diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Kconfig.debug | 41 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/bitmap.c | 19 | ||||
-rw-r--r-- | lib/btree.c | 797 | ||||
-rw-r--r-- | lib/crc32.c | 30 | ||||
-rw-r--r-- | lib/debug_locks.c | 1 | ||||
-rw-r--r-- | lib/hweight.c | 7 | ||||
-rw-r--r-- | lib/idr.c | 12 | ||||
-rw-r--r-- | lib/kobject.c | 6 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 2 | ||||
-rw-r--r-- | lib/list_sort.c | 263 | ||||
-rw-r--r-- | lib/lmb.c | 13 | ||||
-rw-r--r-- | lib/radix-tree.c | 24 | ||||
-rw-r--r-- | lib/show_mem.c | 14 | ||||
-rw-r--r-- | lib/string.c | 40 | ||||
-rw-r--r-- | lib/vsprintf.c | 132 | ||||
-rw-r--r-- | lib/zlib_inflate/inffast.c | 72 |
18 files changed, 1237 insertions, 240 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 97b136ff117e..170d8ca901d8 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -160,6 +160,9 @@ config TEXTSEARCH_BM | |||
160 | config TEXTSEARCH_FSM | 160 | config TEXTSEARCH_FSM |
161 | tristate | 161 | tristate |
162 | 162 | ||
163 | config BTREE | ||
164 | boolean | ||
165 | |||
163 | config HAS_IOMEM | 166 | config HAS_IOMEM |
164 | boolean | 167 | boolean |
165 | depends on !NO_IOMEM | 168 | depends on !NO_IOMEM |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 25c3ed594c54..8e5ec5e1ab91 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -355,7 +355,7 @@ config SLUB_STATS | |||
355 | config DEBUG_KMEMLEAK | 355 | config DEBUG_KMEMLEAK |
356 | bool "Kernel memory leak detector" | 356 | bool "Kernel memory leak detector" |
357 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ | 357 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ |
358 | (X86 || ARM || PPC || S390) | 358 | (X86 || ARM || PPC || S390 || SUPERH) |
359 | 359 | ||
360 | select DEBUG_FS if SYSFS | 360 | select DEBUG_FS if SYSFS |
361 | select STACKTRACE if STACKTRACE_SUPPORT | 361 | select STACKTRACE if STACKTRACE_SUPPORT |
@@ -499,6 +499,18 @@ config PROVE_LOCKING | |||
499 | 499 | ||
500 | For more details, see Documentation/lockdep-design.txt. | 500 | For more details, see Documentation/lockdep-design.txt. |
501 | 501 | ||
502 | config PROVE_RCU | ||
503 | bool "RCU debugging: prove RCU correctness" | ||
504 | depends on PROVE_LOCKING | ||
505 | default n | ||
506 | help | ||
507 | This feature enables lockdep extensions that check for correct | ||
508 | use of RCU APIs. This is currently under development. Say Y | ||
509 | if you want to debug RCU usage or help work on the PROVE_RCU | ||
510 | feature. | ||
511 | |||
512 | Say N if you are unsure. | ||
513 | |||
502 | config LOCKDEP | 514 | config LOCKDEP |
503 | bool | 515 | bool |
504 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 516 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
@@ -520,6 +532,14 @@ config LOCK_STAT | |||
520 | 532 | ||
521 | For more details, see Documentation/lockstat.txt | 533 | For more details, see Documentation/lockstat.txt |
522 | 534 | ||
535 | This also enables lock events required by "perf lock", | ||
536 | subcommand of perf. | ||
537 | If you want to use "perf lock", you also need to turn on | ||
538 | CONFIG_EVENT_TRACING. | ||
539 | |||
540 | CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. | ||
541 | (CONFIG_LOCKDEP defines "acquire" and "release" events.) | ||
542 | |||
523 | config DEBUG_LOCKDEP | 543 | config DEBUG_LOCKDEP |
524 | bool "Lock dependency engine debugging" | 544 | bool "Lock dependency engine debugging" |
525 | depends on DEBUG_KERNEL && LOCKDEP | 545 | depends on DEBUG_KERNEL && LOCKDEP |
@@ -765,10 +785,22 @@ config RCU_CPU_STALL_DETECTOR | |||
765 | CPUs are delaying the current grace period, but only when | 785 | CPUs are delaying the current grace period, but only when |
766 | the grace period extends for excessive time periods. | 786 | the grace period extends for excessive time periods. |
767 | 787 | ||
768 | Say Y if you want RCU to perform such checks. | 788 | Say N if you want to disable such checks. |
789 | |||
790 | Say Y if you are unsure. | ||
791 | |||
792 | config RCU_CPU_STALL_VERBOSE | ||
793 | bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" | ||
794 | depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU | ||
795 | default n | ||
796 | help | ||
797 | This option causes RCU to printk detailed per-task information | ||
798 | for any tasks that are stalling the current RCU grace period. | ||
769 | 799 | ||
770 | Say N if you are unsure. | 800 | Say N if you are unsure. |
771 | 801 | ||
802 | Say Y if you want to enable such checks. | ||
803 | |||
772 | config KPROBES_SANITY_TEST | 804 | config KPROBES_SANITY_TEST |
773 | bool "Kprobes sanity tests" | 805 | bool "Kprobes sanity tests" |
774 | depends on DEBUG_KERNEL | 806 | depends on DEBUG_KERNEL |
@@ -840,8 +872,7 @@ config DEBUG_FORCE_WEAK_PER_CPU | |||
840 | 872 | ||
841 | config LKDTM | 873 | config LKDTM |
842 | tristate "Linux Kernel Dump Test Tool Module" | 874 | tristate "Linux Kernel Dump Test Tool Module" |
843 | depends on DEBUG_KERNEL | 875 | depends on DEBUG_FS |
844 | depends on KPROBES | ||
845 | depends on BLOCK | 876 | depends on BLOCK |
846 | default n | 877 | default n |
847 | help | 878 | help |
@@ -852,7 +883,7 @@ config LKDTM | |||
852 | called lkdtm. | 883 | called lkdtm. |
853 | 884 | ||
854 | Documentation on how to use the module can be found in | 885 | Documentation on how to use the module can be found in |
855 | drivers/misc/lkdtm.c | 886 | Documentation/fault-injection/provoke-crashes.txt |
856 | 887 | ||
857 | config FAULT_INJECTION | 888 | config FAULT_INJECTION |
858 | bool "Fault-injection framework" | 889 | bool "Fault-injection framework" |
diff --git a/lib/Makefile b/lib/Makefile index 3b0b4a696db9..2e152aed7198 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -41,6 +41,7 @@ lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o | |||
41 | obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o | 41 | obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o |
42 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 42 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
43 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | 43 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o |
44 | obj-$(CONFIG_BTREE) += btree.o | ||
44 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o | 45 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o |
45 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o | 46 | obj-$(CONFIG_DEBUG_LIST) += list_debug.o |
46 | obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o | 47 | obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o |
diff --git a/lib/bitmap.c b/lib/bitmap.c index 11bf49750583..ffb78c916ccd 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -487,7 +487,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, | |||
487 | EXPORT_SYMBOL(__bitmap_parse); | 487 | EXPORT_SYMBOL(__bitmap_parse); |
488 | 488 | ||
489 | /** | 489 | /** |
490 | * bitmap_parse_user() | 490 | * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap |
491 | * | 491 | * |
492 | * @ubuf: pointer to user buffer containing string. | 492 | * @ubuf: pointer to user buffer containing string. |
493 | * @ulen: buffer size in bytes. If string is smaller than this | 493 | * @ulen: buffer size in bytes. If string is smaller than this |
@@ -619,7 +619,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) | |||
619 | EXPORT_SYMBOL(bitmap_parselist); | 619 | EXPORT_SYMBOL(bitmap_parselist); |
620 | 620 | ||
621 | /** | 621 | /** |
622 | * bitmap_pos_to_ord(buf, pos, bits) | 622 | * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap |
623 | * @buf: pointer to a bitmap | 623 | * @buf: pointer to a bitmap |
624 | * @pos: a bit position in @buf (0 <= @pos < @bits) | 624 | * @pos: a bit position in @buf (0 <= @pos < @bits) |
625 | * @bits: number of valid bit positions in @buf | 625 | * @bits: number of valid bit positions in @buf |
@@ -655,7 +655,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) | |||
655 | } | 655 | } |
656 | 656 | ||
657 | /** | 657 | /** |
658 | * bitmap_ord_to_pos(buf, ord, bits) | 658 | * bitmap_ord_to_pos - find position of n-th set bit in bitmap |
659 | * @buf: pointer to bitmap | 659 | * @buf: pointer to bitmap |
660 | * @ord: ordinal bit position (n-th set bit, n >= 0) | 660 | * @ord: ordinal bit position (n-th set bit, n >= 0) |
661 | * @bits: number of valid bit positions in @buf | 661 | * @bits: number of valid bit positions in @buf |
@@ -733,10 +733,9 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src, | |||
733 | bitmap_zero(dst, bits); | 733 | bitmap_zero(dst, bits); |
734 | 734 | ||
735 | w = bitmap_weight(new, bits); | 735 | w = bitmap_weight(new, bits); |
736 | for (oldbit = find_first_bit(src, bits); | 736 | for_each_set_bit(oldbit, src, bits) { |
737 | oldbit < bits; | ||
738 | oldbit = find_next_bit(src, bits, oldbit + 1)) { | ||
739 | int n = bitmap_pos_to_ord(old, oldbit, bits); | 737 | int n = bitmap_pos_to_ord(old, oldbit, bits); |
738 | |||
740 | if (n < 0 || w == 0) | 739 | if (n < 0 || w == 0) |
741 | set_bit(oldbit, dst); /* identity map */ | 740 | set_bit(oldbit, dst); /* identity map */ |
742 | else | 741 | else |
@@ -903,9 +902,7 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig, | |||
903 | */ | 902 | */ |
904 | 903 | ||
905 | m = 0; | 904 | m = 0; |
906 | for (n = find_first_bit(relmap, bits); | 905 | for_each_set_bit(n, relmap, bits) { |
907 | n < bits; | ||
908 | n = find_next_bit(relmap, bits, n + 1)) { | ||
909 | /* m == bitmap_pos_to_ord(relmap, n, bits) */ | 906 | /* m == bitmap_pos_to_ord(relmap, n, bits) */ |
910 | if (test_bit(m, orig)) | 907 | if (test_bit(m, orig)) |
911 | set_bit(n, dst); | 908 | set_bit(n, dst); |
@@ -934,9 +931,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig, | |||
934 | return; | 931 | return; |
935 | bitmap_zero(dst, bits); | 932 | bitmap_zero(dst, bits); |
936 | 933 | ||
937 | for (oldbit = find_first_bit(orig, bits); | 934 | for_each_set_bit(oldbit, orig, bits) |
938 | oldbit < bits; | ||
939 | oldbit = find_next_bit(orig, bits, oldbit + 1)) | ||
940 | set_bit(oldbit % sz, dst); | 935 | set_bit(oldbit % sz, dst); |
941 | } | 936 | } |
942 | EXPORT_SYMBOL(bitmap_fold); | 937 | EXPORT_SYMBOL(bitmap_fold); |
diff --git a/lib/btree.c b/lib/btree.c new file mode 100644 index 000000000000..41859a820218 --- /dev/null +++ b/lib/btree.c | |||
@@ -0,0 +1,797 @@ | |||
1 | /* | ||
2 | * lib/btree.c - Simple In-memory B+Tree | ||
3 | * | ||
4 | * As should be obvious for Linux kernel code, license is GPLv2 | ||
5 | * | ||
6 | * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> | ||
7 | * Bits and pieces stolen from Peter Zijlstra's code, which is | ||
8 | * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com> | ||
9 | * GPLv2 | ||
10 | * | ||
11 | * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch | ||
12 | * | ||
13 | * A relatively simple B+Tree implementation. I have written it as a learning | ||
14 | * excercise to understand how B+Trees work. Turned out to be useful as well. | ||
15 | * | ||
16 | * B+Trees can be used similar to Linux radix trees (which don't have anything | ||
17 | * in common with textbook radix trees, beware). Prerequisite for them working | ||
18 | * well is that access to a random tree node is much faster than a large number | ||
19 | * of operations within each node. | ||
20 | * | ||
21 | * Disks have fulfilled the prerequisite for a long time. More recently DRAM | ||
22 | * has gained similar properties, as memory access times, when measured in cpu | ||
23 | * cycles, have increased. Cacheline sizes have increased as well, which also | ||
24 | * helps B+Trees. | ||
25 | * | ||
26 | * Compared to radix trees, B+Trees are more efficient when dealing with a | ||
27 | * sparsely populated address space. Between 25% and 50% of the memory is | ||
28 | * occupied with valid pointers. When densely populated, radix trees contain | ||
29 | * ~98% pointers - hard to beat. Very sparse radix trees contain only ~2% | ||
30 | * pointers. | ||
31 | * | ||
32 | * This particular implementation stores pointers identified by a long value. | ||
33 | * Storing NULL pointers is illegal, lookup will return NULL when no entry | ||
34 | * was found. | ||
35 | * | ||
36 | * A tricks was used that is not commonly found in textbooks. The lowest | ||
37 | * values are to the right, not to the left. All used slots within a node | ||
38 | * are on the left, all unused slots contain NUL values. Most operations | ||
39 | * simply loop once over all slots and terminate on the first NUL. | ||
40 | */ | ||
41 | |||
42 | #include <linux/btree.h> | ||
43 | #include <linux/cache.h> | ||
44 | #include <linux/kernel.h> | ||
45 | #include <linux/slab.h> | ||
46 | #include <linux/module.h> | ||
47 | |||
48 | #define MAX(a, b) ((a) > (b) ? (a) : (b)) | ||
49 | #define NODESIZE MAX(L1_CACHE_BYTES, 128) | ||
50 | |||
51 | struct btree_geo { | ||
52 | int keylen; | ||
53 | int no_pairs; | ||
54 | int no_longs; | ||
55 | }; | ||
56 | |||
57 | struct btree_geo btree_geo32 = { | ||
58 | .keylen = 1, | ||
59 | .no_pairs = NODESIZE / sizeof(long) / 2, | ||
60 | .no_longs = NODESIZE / sizeof(long) / 2, | ||
61 | }; | ||
62 | EXPORT_SYMBOL_GPL(btree_geo32); | ||
63 | |||
64 | #define LONG_PER_U64 (64 / BITS_PER_LONG) | ||
65 | struct btree_geo btree_geo64 = { | ||
66 | .keylen = LONG_PER_U64, | ||
67 | .no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64), | ||
68 | .no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)), | ||
69 | }; | ||
70 | EXPORT_SYMBOL_GPL(btree_geo64); | ||
71 | |||
72 | struct btree_geo btree_geo128 = { | ||
73 | .keylen = 2 * LONG_PER_U64, | ||
74 | .no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64), | ||
75 | .no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)), | ||
76 | }; | ||
77 | EXPORT_SYMBOL_GPL(btree_geo128); | ||
78 | |||
79 | static struct kmem_cache *btree_cachep; | ||
80 | |||
81 | void *btree_alloc(gfp_t gfp_mask, void *pool_data) | ||
82 | { | ||
83 | return kmem_cache_alloc(btree_cachep, gfp_mask); | ||
84 | } | ||
85 | EXPORT_SYMBOL_GPL(btree_alloc); | ||
86 | |||
87 | void btree_free(void *element, void *pool_data) | ||
88 | { | ||
89 | kmem_cache_free(btree_cachep, element); | ||
90 | } | ||
91 | EXPORT_SYMBOL_GPL(btree_free); | ||
92 | |||
93 | static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp) | ||
94 | { | ||
95 | unsigned long *node; | ||
96 | |||
97 | node = mempool_alloc(head->mempool, gfp); | ||
98 | memset(node, 0, NODESIZE); | ||
99 | return node; | ||
100 | } | ||
101 | |||
102 | static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n) | ||
103 | { | ||
104 | size_t i; | ||
105 | |||
106 | for (i = 0; i < n; i++) { | ||
107 | if (l1[i] < l2[i]) | ||
108 | return -1; | ||
109 | if (l1[i] > l2[i]) | ||
110 | return 1; | ||
111 | } | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static unsigned long *longcpy(unsigned long *dest, const unsigned long *src, | ||
116 | size_t n) | ||
117 | { | ||
118 | size_t i; | ||
119 | |||
120 | for (i = 0; i < n; i++) | ||
121 | dest[i] = src[i]; | ||
122 | return dest; | ||
123 | } | ||
124 | |||
125 | static unsigned long *longset(unsigned long *s, unsigned long c, size_t n) | ||
126 | { | ||
127 | size_t i; | ||
128 | |||
129 | for (i = 0; i < n; i++) | ||
130 | s[i] = c; | ||
131 | return s; | ||
132 | } | ||
133 | |||
134 | static void dec_key(struct btree_geo *geo, unsigned long *key) | ||
135 | { | ||
136 | unsigned long val; | ||
137 | int i; | ||
138 | |||
139 | for (i = geo->keylen - 1; i >= 0; i--) { | ||
140 | val = key[i]; | ||
141 | key[i] = val - 1; | ||
142 | if (val) | ||
143 | break; | ||
144 | } | ||
145 | } | ||
146 | |||
147 | static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n) | ||
148 | { | ||
149 | return &node[n * geo->keylen]; | ||
150 | } | ||
151 | |||
152 | static void *bval(struct btree_geo *geo, unsigned long *node, int n) | ||
153 | { | ||
154 | return (void *)node[geo->no_longs + n]; | ||
155 | } | ||
156 | |||
157 | static void setkey(struct btree_geo *geo, unsigned long *node, int n, | ||
158 | unsigned long *key) | ||
159 | { | ||
160 | longcpy(bkey(geo, node, n), key, geo->keylen); | ||
161 | } | ||
162 | |||
163 | static void setval(struct btree_geo *geo, unsigned long *node, int n, | ||
164 | void *val) | ||
165 | { | ||
166 | node[geo->no_longs + n] = (unsigned long) val; | ||
167 | } | ||
168 | |||
169 | static void clearpair(struct btree_geo *geo, unsigned long *node, int n) | ||
170 | { | ||
171 | longset(bkey(geo, node, n), 0, geo->keylen); | ||
172 | node[geo->no_longs + n] = 0; | ||
173 | } | ||
174 | |||
175 | static inline void __btree_init(struct btree_head *head) | ||
176 | { | ||
177 | head->node = NULL; | ||
178 | head->height = 0; | ||
179 | } | ||
180 | |||
181 | void btree_init_mempool(struct btree_head *head, mempool_t *mempool) | ||
182 | { | ||
183 | __btree_init(head); | ||
184 | head->mempool = mempool; | ||
185 | } | ||
186 | EXPORT_SYMBOL_GPL(btree_init_mempool); | ||
187 | |||
188 | int btree_init(struct btree_head *head) | ||
189 | { | ||
190 | __btree_init(head); | ||
191 | head->mempool = mempool_create(0, btree_alloc, btree_free, NULL); | ||
192 | if (!head->mempool) | ||
193 | return -ENOMEM; | ||
194 | return 0; | ||
195 | } | ||
196 | EXPORT_SYMBOL_GPL(btree_init); | ||
197 | |||
198 | void btree_destroy(struct btree_head *head) | ||
199 | { | ||
200 | mempool_destroy(head->mempool); | ||
201 | head->mempool = NULL; | ||
202 | } | ||
203 | EXPORT_SYMBOL_GPL(btree_destroy); | ||
204 | |||
205 | void *btree_last(struct btree_head *head, struct btree_geo *geo, | ||
206 | unsigned long *key) | ||
207 | { | ||
208 | int height = head->height; | ||
209 | unsigned long *node = head->node; | ||
210 | |||
211 | if (height == 0) | ||
212 | return NULL; | ||
213 | |||
214 | for ( ; height > 1; height--) | ||
215 | node = bval(geo, node, 0); | ||
216 | |||
217 | longcpy(key, bkey(geo, node, 0), geo->keylen); | ||
218 | return bval(geo, node, 0); | ||
219 | } | ||
220 | EXPORT_SYMBOL_GPL(btree_last); | ||
221 | |||
222 | static int keycmp(struct btree_geo *geo, unsigned long *node, int pos, | ||
223 | unsigned long *key) | ||
224 | { | ||
225 | return longcmp(bkey(geo, node, pos), key, geo->keylen); | ||
226 | } | ||
227 | |||
228 | static int keyzero(struct btree_geo *geo, unsigned long *key) | ||
229 | { | ||
230 | int i; | ||
231 | |||
232 | for (i = 0; i < geo->keylen; i++) | ||
233 | if (key[i]) | ||
234 | return 0; | ||
235 | |||
236 | return 1; | ||
237 | } | ||
238 | |||
239 | void *btree_lookup(struct btree_head *head, struct btree_geo *geo, | ||
240 | unsigned long *key) | ||
241 | { | ||
242 | int i, height = head->height; | ||
243 | unsigned long *node = head->node; | ||
244 | |||
245 | if (height == 0) | ||
246 | return NULL; | ||
247 | |||
248 | for ( ; height > 1; height--) { | ||
249 | for (i = 0; i < geo->no_pairs; i++) | ||
250 | if (keycmp(geo, node, i, key) <= 0) | ||
251 | break; | ||
252 | if (i == geo->no_pairs) | ||
253 | return NULL; | ||
254 | node = bval(geo, node, i); | ||
255 | if (!node) | ||
256 | return NULL; | ||
257 | } | ||
258 | |||
259 | if (!node) | ||
260 | return NULL; | ||
261 | |||
262 | for (i = 0; i < geo->no_pairs; i++) | ||
263 | if (keycmp(geo, node, i, key) == 0) | ||
264 | return bval(geo, node, i); | ||
265 | return NULL; | ||
266 | } | ||
267 | EXPORT_SYMBOL_GPL(btree_lookup); | ||
268 | |||
269 | int btree_update(struct btree_head *head, struct btree_geo *geo, | ||
270 | unsigned long *key, void *val) | ||
271 | { | ||
272 | int i, height = head->height; | ||
273 | unsigned long *node = head->node; | ||
274 | |||
275 | if (height == 0) | ||
276 | return -ENOENT; | ||
277 | |||
278 | for ( ; height > 1; height--) { | ||
279 | for (i = 0; i < geo->no_pairs; i++) | ||
280 | if (keycmp(geo, node, i, key) <= 0) | ||
281 | break; | ||
282 | if (i == geo->no_pairs) | ||
283 | return -ENOENT; | ||
284 | node = bval(geo, node, i); | ||
285 | if (!node) | ||
286 | return -ENOENT; | ||
287 | } | ||
288 | |||
289 | if (!node) | ||
290 | return -ENOENT; | ||
291 | |||
292 | for (i = 0; i < geo->no_pairs; i++) | ||
293 | if (keycmp(geo, node, i, key) == 0) { | ||
294 | setval(geo, node, i, val); | ||
295 | return 0; | ||
296 | } | ||
297 | return -ENOENT; | ||
298 | } | ||
299 | EXPORT_SYMBOL_GPL(btree_update); | ||
300 | |||
301 | /* | ||
302 | * Usually this function is quite similar to normal lookup. But the key of | ||
303 | * a parent node may be smaller than the smallest key of all its siblings. | ||
304 | * In such a case we cannot just return NULL, as we have only proven that no | ||
305 | * key smaller than __key, but larger than this parent key exists. | ||
306 | * So we set __key to the parent key and retry. We have to use the smallest | ||
307 | * such parent key, which is the last parent key we encountered. | ||
308 | */ | ||
309 | void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, | ||
310 | unsigned long *__key) | ||
311 | { | ||
312 | int i, height; | ||
313 | unsigned long *node, *oldnode; | ||
314 | unsigned long *retry_key = NULL, key[geo->keylen]; | ||
315 | |||
316 | if (keyzero(geo, __key)) | ||
317 | return NULL; | ||
318 | |||
319 | if (head->height == 0) | ||
320 | return NULL; | ||
321 | retry: | ||
322 | longcpy(key, __key, geo->keylen); | ||
323 | dec_key(geo, key); | ||
324 | |||
325 | node = head->node; | ||
326 | for (height = head->height ; height > 1; height--) { | ||
327 | for (i = 0; i < geo->no_pairs; i++) | ||
328 | if (keycmp(geo, node, i, key) <= 0) | ||
329 | break; | ||
330 | if (i == geo->no_pairs) | ||
331 | goto miss; | ||
332 | oldnode = node; | ||
333 | node = bval(geo, node, i); | ||
334 | if (!node) | ||
335 | goto miss; | ||
336 | retry_key = bkey(geo, oldnode, i); | ||
337 | } | ||
338 | |||
339 | if (!node) | ||
340 | goto miss; | ||
341 | |||
342 | for (i = 0; i < geo->no_pairs; i++) { | ||
343 | if (keycmp(geo, node, i, key) <= 0) { | ||
344 | if (bval(geo, node, i)) { | ||
345 | longcpy(__key, bkey(geo, node, i), geo->keylen); | ||
346 | return bval(geo, node, i); | ||
347 | } else | ||
348 | goto miss; | ||
349 | } | ||
350 | } | ||
351 | miss: | ||
352 | if (retry_key) { | ||
353 | __key = retry_key; | ||
354 | retry_key = NULL; | ||
355 | goto retry; | ||
356 | } | ||
357 | return NULL; | ||
358 | } | ||
359 | |||
360 | static int getpos(struct btree_geo *geo, unsigned long *node, | ||
361 | unsigned long *key) | ||
362 | { | ||
363 | int i; | ||
364 | |||
365 | for (i = 0; i < geo->no_pairs; i++) { | ||
366 | if (keycmp(geo, node, i, key) <= 0) | ||
367 | break; | ||
368 | } | ||
369 | return i; | ||
370 | } | ||
371 | |||
372 | static int getfill(struct btree_geo *geo, unsigned long *node, int start) | ||
373 | { | ||
374 | int i; | ||
375 | |||
376 | for (i = start; i < geo->no_pairs; i++) | ||
377 | if (!bval(geo, node, i)) | ||
378 | break; | ||
379 | return i; | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * locate the correct leaf node in the btree | ||
384 | */ | ||
385 | static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo, | ||
386 | unsigned long *key, int level) | ||
387 | { | ||
388 | unsigned long *node = head->node; | ||
389 | int i, height; | ||
390 | |||
391 | for (height = head->height; height > level; height--) { | ||
392 | for (i = 0; i < geo->no_pairs; i++) | ||
393 | if (keycmp(geo, node, i, key) <= 0) | ||
394 | break; | ||
395 | |||
396 | if ((i == geo->no_pairs) || !bval(geo, node, i)) { | ||
397 | /* right-most key is too large, update it */ | ||
398 | /* FIXME: If the right-most key on higher levels is | ||
399 | * always zero, this wouldn't be necessary. */ | ||
400 | i--; | ||
401 | setkey(geo, node, i, key); | ||
402 | } | ||
403 | BUG_ON(i < 0); | ||
404 | node = bval(geo, node, i); | ||
405 | } | ||
406 | BUG_ON(!node); | ||
407 | return node; | ||
408 | } | ||
409 | |||
410 | static int btree_grow(struct btree_head *head, struct btree_geo *geo, | ||
411 | gfp_t gfp) | ||
412 | { | ||
413 | unsigned long *node; | ||
414 | int fill; | ||
415 | |||
416 | node = btree_node_alloc(head, gfp); | ||
417 | if (!node) | ||
418 | return -ENOMEM; | ||
419 | if (head->node) { | ||
420 | fill = getfill(geo, head->node, 0); | ||
421 | setkey(geo, node, 0, bkey(geo, head->node, fill - 1)); | ||
422 | setval(geo, node, 0, head->node); | ||
423 | } | ||
424 | head->node = node; | ||
425 | head->height++; | ||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | static void btree_shrink(struct btree_head *head, struct btree_geo *geo) | ||
430 | { | ||
431 | unsigned long *node; | ||
432 | int fill; | ||
433 | |||
434 | if (head->height <= 1) | ||
435 | return; | ||
436 | |||
437 | node = head->node; | ||
438 | fill = getfill(geo, node, 0); | ||
439 | BUG_ON(fill > 1); | ||
440 | head->node = bval(geo, node, 0); | ||
441 | head->height--; | ||
442 | mempool_free(node, head->mempool); | ||
443 | } | ||
444 | |||
445 | static int btree_insert_level(struct btree_head *head, struct btree_geo *geo, | ||
446 | unsigned long *key, void *val, int level, | ||
447 | gfp_t gfp) | ||
448 | { | ||
449 | unsigned long *node; | ||
450 | int i, pos, fill, err; | ||
451 | |||
452 | BUG_ON(!val); | ||
453 | if (head->height < level) { | ||
454 | err = btree_grow(head, geo, gfp); | ||
455 | if (err) | ||
456 | return err; | ||
457 | } | ||
458 | |||
459 | retry: | ||
460 | node = find_level(head, geo, key, level); | ||
461 | pos = getpos(geo, node, key); | ||
462 | fill = getfill(geo, node, pos); | ||
463 | /* two identical keys are not allowed */ | ||
464 | BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0); | ||
465 | |||
466 | if (fill == geo->no_pairs) { | ||
467 | /* need to split node */ | ||
468 | unsigned long *new; | ||
469 | |||
470 | new = btree_node_alloc(head, gfp); | ||
471 | if (!new) | ||
472 | return -ENOMEM; | ||
473 | err = btree_insert_level(head, geo, | ||
474 | bkey(geo, node, fill / 2 - 1), | ||
475 | new, level + 1, gfp); | ||
476 | if (err) { | ||
477 | mempool_free(new, head->mempool); | ||
478 | return err; | ||
479 | } | ||
480 | for (i = 0; i < fill / 2; i++) { | ||
481 | setkey(geo, new, i, bkey(geo, node, i)); | ||
482 | setval(geo, new, i, bval(geo, node, i)); | ||
483 | setkey(geo, node, i, bkey(geo, node, i + fill / 2)); | ||
484 | setval(geo, node, i, bval(geo, node, i + fill / 2)); | ||
485 | clearpair(geo, node, i + fill / 2); | ||
486 | } | ||
487 | if (fill & 1) { | ||
488 | setkey(geo, node, i, bkey(geo, node, fill - 1)); | ||
489 | setval(geo, node, i, bval(geo, node, fill - 1)); | ||
490 | clearpair(geo, node, fill - 1); | ||
491 | } | ||
492 | goto retry; | ||
493 | } | ||
494 | BUG_ON(fill >= geo->no_pairs); | ||
495 | |||
496 | /* shift and insert */ | ||
497 | for (i = fill; i > pos; i--) { | ||
498 | setkey(geo, node, i, bkey(geo, node, i - 1)); | ||
499 | setval(geo, node, i, bval(geo, node, i - 1)); | ||
500 | } | ||
501 | setkey(geo, node, pos, key); | ||
502 | setval(geo, node, pos, val); | ||
503 | |||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | int btree_insert(struct btree_head *head, struct btree_geo *geo, | ||
508 | unsigned long *key, void *val, gfp_t gfp) | ||
509 | { | ||
510 | return btree_insert_level(head, geo, key, val, 1, gfp); | ||
511 | } | ||
512 | EXPORT_SYMBOL_GPL(btree_insert); | ||
513 | |||
514 | static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, | ||
515 | unsigned long *key, int level); | ||
516 | static void merge(struct btree_head *head, struct btree_geo *geo, int level, | ||
517 | unsigned long *left, int lfill, | ||
518 | unsigned long *right, int rfill, | ||
519 | unsigned long *parent, int lpos) | ||
520 | { | ||
521 | int i; | ||
522 | |||
523 | for (i = 0; i < rfill; i++) { | ||
524 | /* Move all keys to the left */ | ||
525 | setkey(geo, left, lfill + i, bkey(geo, right, i)); | ||
526 | setval(geo, left, lfill + i, bval(geo, right, i)); | ||
527 | } | ||
528 | /* Exchange left and right child in parent */ | ||
529 | setval(geo, parent, lpos, right); | ||
530 | setval(geo, parent, lpos + 1, left); | ||
531 | /* Remove left (formerly right) child from parent */ | ||
532 | btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1); | ||
533 | mempool_free(right, head->mempool); | ||
534 | } | ||
535 | |||
536 | static void rebalance(struct btree_head *head, struct btree_geo *geo, | ||
537 | unsigned long *key, int level, unsigned long *child, int fill) | ||
538 | { | ||
539 | unsigned long *parent, *left = NULL, *right = NULL; | ||
540 | int i, no_left, no_right; | ||
541 | |||
542 | if (fill == 0) { | ||
543 | /* Because we don't steal entries from a neigbour, this case | ||
544 | * can happen. Parent node contains a single child, this | ||
545 | * node, so merging with a sibling never happens. | ||
546 | */ | ||
547 | btree_remove_level(head, geo, key, level + 1); | ||
548 | mempool_free(child, head->mempool); | ||
549 | return; | ||
550 | } | ||
551 | |||
552 | parent = find_level(head, geo, key, level + 1); | ||
553 | i = getpos(geo, parent, key); | ||
554 | BUG_ON(bval(geo, parent, i) != child); | ||
555 | |||
556 | if (i > 0) { | ||
557 | left = bval(geo, parent, i - 1); | ||
558 | no_left = getfill(geo, left, 0); | ||
559 | if (fill + no_left <= geo->no_pairs) { | ||
560 | merge(head, geo, level, | ||
561 | left, no_left, | ||
562 | child, fill, | ||
563 | parent, i - 1); | ||
564 | return; | ||
565 | } | ||
566 | } | ||
567 | if (i + 1 < getfill(geo, parent, i)) { | ||
568 | right = bval(geo, parent, i + 1); | ||
569 | no_right = getfill(geo, right, 0); | ||
570 | if (fill + no_right <= geo->no_pairs) { | ||
571 | merge(head, geo, level, | ||
572 | child, fill, | ||
573 | right, no_right, | ||
574 | parent, i); | ||
575 | return; | ||
576 | } | ||
577 | } | ||
578 | /* | ||
579 | * We could also try to steal one entry from the left or right | ||
580 | * neighbor. By not doing so we changed the invariant from | ||
581 | * "all nodes are at least half full" to "no two neighboring | ||
582 | * nodes can be merged". Which means that the average fill of | ||
583 | * all nodes is still half or better. | ||
584 | */ | ||
585 | } | ||
586 | |||
587 | static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, | ||
588 | unsigned long *key, int level) | ||
589 | { | ||
590 | unsigned long *node; | ||
591 | int i, pos, fill; | ||
592 | void *ret; | ||
593 | |||
594 | if (level > head->height) { | ||
595 | /* we recursed all the way up */ | ||
596 | head->height = 0; | ||
597 | head->node = NULL; | ||
598 | return NULL; | ||
599 | } | ||
600 | |||
601 | node = find_level(head, geo, key, level); | ||
602 | pos = getpos(geo, node, key); | ||
603 | fill = getfill(geo, node, pos); | ||
604 | if ((level == 1) && (keycmp(geo, node, pos, key) != 0)) | ||
605 | return NULL; | ||
606 | ret = bval(geo, node, pos); | ||
607 | |||
608 | /* remove and shift */ | ||
609 | for (i = pos; i < fill - 1; i++) { | ||
610 | setkey(geo, node, i, bkey(geo, node, i + 1)); | ||
611 | setval(geo, node, i, bval(geo, node, i + 1)); | ||
612 | } | ||
613 | clearpair(geo, node, fill - 1); | ||
614 | |||
615 | if (fill - 1 < geo->no_pairs / 2) { | ||
616 | if (level < head->height) | ||
617 | rebalance(head, geo, key, level, node, fill - 1); | ||
618 | else if (fill - 1 == 1) | ||
619 | btree_shrink(head, geo); | ||
620 | } | ||
621 | |||
622 | return ret; | ||
623 | } | ||
624 | |||
625 | void *btree_remove(struct btree_head *head, struct btree_geo *geo, | ||
626 | unsigned long *key) | ||
627 | { | ||
628 | if (head->height == 0) | ||
629 | return NULL; | ||
630 | |||
631 | return btree_remove_level(head, geo, key, 1); | ||
632 | } | ||
633 | EXPORT_SYMBOL_GPL(btree_remove); | ||
634 | |||
635 | int btree_merge(struct btree_head *target, struct btree_head *victim, | ||
636 | struct btree_geo *geo, gfp_t gfp) | ||
637 | { | ||
638 | unsigned long key[geo->keylen]; | ||
639 | unsigned long dup[geo->keylen]; | ||
640 | void *val; | ||
641 | int err; | ||
642 | |||
643 | BUG_ON(target == victim); | ||
644 | |||
645 | if (!(target->node)) { | ||
646 | /* target is empty, just copy fields over */ | ||
647 | target->node = victim->node; | ||
648 | target->height = victim->height; | ||
649 | __btree_init(victim); | ||
650 | return 0; | ||
651 | } | ||
652 | |||
653 | /* TODO: This needs some optimizations. Currently we do three tree | ||
654 | * walks to remove a single object from the victim. | ||
655 | */ | ||
656 | for (;;) { | ||
657 | if (!btree_last(victim, geo, key)) | ||
658 | break; | ||
659 | val = btree_lookup(victim, geo, key); | ||
660 | err = btree_insert(target, geo, key, val, gfp); | ||
661 | if (err) | ||
662 | return err; | ||
663 | /* We must make a copy of the key, as the original will get | ||
664 | * mangled inside btree_remove. */ | ||
665 | longcpy(dup, key, geo->keylen); | ||
666 | btree_remove(victim, geo, dup); | ||
667 | } | ||
668 | return 0; | ||
669 | } | ||
670 | EXPORT_SYMBOL_GPL(btree_merge); | ||
671 | |||
672 | static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo, | ||
673 | unsigned long *node, unsigned long opaque, | ||
674 | void (*func)(void *elem, unsigned long opaque, | ||
675 | unsigned long *key, size_t index, | ||
676 | void *func2), | ||
677 | void *func2, int reap, int height, size_t count) | ||
678 | { | ||
679 | int i; | ||
680 | unsigned long *child; | ||
681 | |||
682 | for (i = 0; i < geo->no_pairs; i++) { | ||
683 | child = bval(geo, node, i); | ||
684 | if (!child) | ||
685 | break; | ||
686 | if (height > 1) | ||
687 | count = __btree_for_each(head, geo, child, opaque, | ||
688 | func, func2, reap, height - 1, count); | ||
689 | else | ||
690 | func(child, opaque, bkey(geo, node, i), count++, | ||
691 | func2); | ||
692 | } | ||
693 | if (reap) | ||
694 | mempool_free(node, head->mempool); | ||
695 | return count; | ||
696 | } | ||
697 | |||
698 | static void empty(void *elem, unsigned long opaque, unsigned long *key, | ||
699 | size_t index, void *func2) | ||
700 | { | ||
701 | } | ||
702 | |||
703 | void visitorl(void *elem, unsigned long opaque, unsigned long *key, | ||
704 | size_t index, void *__func) | ||
705 | { | ||
706 | visitorl_t func = __func; | ||
707 | |||
708 | func(elem, opaque, *key, index); | ||
709 | } | ||
710 | EXPORT_SYMBOL_GPL(visitorl); | ||
711 | |||
712 | void visitor32(void *elem, unsigned long opaque, unsigned long *__key, | ||
713 | size_t index, void *__func) | ||
714 | { | ||
715 | visitor32_t func = __func; | ||
716 | u32 *key = (void *)__key; | ||
717 | |||
718 | func(elem, opaque, *key, index); | ||
719 | } | ||
720 | EXPORT_SYMBOL_GPL(visitor32); | ||
721 | |||
722 | void visitor64(void *elem, unsigned long opaque, unsigned long *__key, | ||
723 | size_t index, void *__func) | ||
724 | { | ||
725 | visitor64_t func = __func; | ||
726 | u64 *key = (void *)__key; | ||
727 | |||
728 | func(elem, opaque, *key, index); | ||
729 | } | ||
730 | EXPORT_SYMBOL_GPL(visitor64); | ||
731 | |||
732 | void visitor128(void *elem, unsigned long opaque, unsigned long *__key, | ||
733 | size_t index, void *__func) | ||
734 | { | ||
735 | visitor128_t func = __func; | ||
736 | u64 *key = (void *)__key; | ||
737 | |||
738 | func(elem, opaque, key[0], key[1], index); | ||
739 | } | ||
740 | EXPORT_SYMBOL_GPL(visitor128); | ||
741 | |||
742 | size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, | ||
743 | unsigned long opaque, | ||
744 | void (*func)(void *elem, unsigned long opaque, | ||
745 | unsigned long *key, | ||
746 | size_t index, void *func2), | ||
747 | void *func2) | ||
748 | { | ||
749 | size_t count = 0; | ||
750 | |||
751 | if (!func2) | ||
752 | func = empty; | ||
753 | if (head->node) | ||
754 | count = __btree_for_each(head, geo, head->node, opaque, func, | ||
755 | func2, 0, head->height, 0); | ||
756 | return count; | ||
757 | } | ||
758 | EXPORT_SYMBOL_GPL(btree_visitor); | ||
759 | |||
760 | size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, | ||
761 | unsigned long opaque, | ||
762 | void (*func)(void *elem, unsigned long opaque, | ||
763 | unsigned long *key, | ||
764 | size_t index, void *func2), | ||
765 | void *func2) | ||
766 | { | ||
767 | size_t count = 0; | ||
768 | |||
769 | if (!func2) | ||
770 | func = empty; | ||
771 | if (head->node) | ||
772 | count = __btree_for_each(head, geo, head->node, opaque, func, | ||
773 | func2, 1, head->height, 0); | ||
774 | __btree_init(head); | ||
775 | return count; | ||
776 | } | ||
777 | EXPORT_SYMBOL_GPL(btree_grim_visitor); | ||
778 | |||
779 | static int __init btree_module_init(void) | ||
780 | { | ||
781 | btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0, | ||
782 | SLAB_HWCACHE_ALIGN, NULL); | ||
783 | return 0; | ||
784 | } | ||
785 | |||
786 | static void __exit btree_module_exit(void) | ||
787 | { | ||
788 | kmem_cache_destroy(btree_cachep); | ||
789 | } | ||
790 | |||
791 | /* If core code starts using btree, initialization should happen even earlier */ | ||
792 | module_init(btree_module_init); | ||
793 | module_exit(btree_module_exit); | ||
794 | |||
795 | MODULE_AUTHOR("Joern Engel <joern@logfs.org>"); | ||
796 | MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); | ||
797 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/crc32.c b/lib/crc32.c index 02e3b31b3a79..0f45fbff34cb 100644 --- a/lib/crc32.c +++ b/lib/crc32.c | |||
@@ -30,11 +30,15 @@ | |||
30 | #include <asm/atomic.h> | 30 | #include <asm/atomic.h> |
31 | #include "crc32defs.h" | 31 | #include "crc32defs.h" |
32 | #if CRC_LE_BITS == 8 | 32 | #if CRC_LE_BITS == 8 |
33 | #define tole(x) __constant_cpu_to_le32(x) | 33 | # define tole(x) __constant_cpu_to_le32(x) |
34 | #define tobe(x) __constant_cpu_to_be32(x) | ||
35 | #else | 34 | #else |
36 | #define tole(x) (x) | 35 | # define tole(x) (x) |
37 | #define tobe(x) (x) | 36 | #endif |
37 | |||
38 | #if CRC_BE_BITS == 8 | ||
39 | # define tobe(x) __constant_cpu_to_be32(x) | ||
40 | #else | ||
41 | # define tobe(x) (x) | ||
38 | #endif | 42 | #endif |
39 | #include "crc32table.h" | 43 | #include "crc32table.h" |
40 | 44 | ||
@@ -52,20 +56,19 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab) | |||
52 | # else | 56 | # else |
53 | # define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) | 57 | # define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) |
54 | # endif | 58 | # endif |
55 | const u32 *b = (const u32 *)buf; | 59 | const u32 *b; |
56 | size_t rem_len; | 60 | size_t rem_len; |
57 | 61 | ||
58 | /* Align it */ | 62 | /* Align it */ |
59 | if (unlikely((long)b & 3 && len)) { | 63 | if (unlikely((long)buf & 3 && len)) { |
60 | u8 *p = (u8 *)b; | ||
61 | do { | 64 | do { |
62 | DO_CRC(*p++); | 65 | DO_CRC(*buf++); |
63 | } while ((--len) && ((long)p)&3); | 66 | } while ((--len) && ((long)buf)&3); |
64 | b = (u32 *)p; | ||
65 | } | 67 | } |
66 | rem_len = len & 3; | 68 | rem_len = len & 3; |
67 | /* load data 32 bits wide, xor data 32 bits wide. */ | 69 | /* load data 32 bits wide, xor data 32 bits wide. */ |
68 | len = len >> 2; | 70 | len = len >> 2; |
71 | b = (const u32 *)buf; | ||
69 | for (--b; len; --len) { | 72 | for (--b; len; --len) { |
70 | crc ^= *++b; /* use pre increment for speed */ | 73 | crc ^= *++b; /* use pre increment for speed */ |
71 | DO_CRC(0); | 74 | DO_CRC(0); |
@@ -82,6 +85,7 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab) | |||
82 | } while (--len); | 85 | } while (--len); |
83 | } | 86 | } |
84 | return crc; | 87 | return crc; |
88 | #undef DO_CRC | ||
85 | } | 89 | } |
86 | #endif | 90 | #endif |
87 | /** | 91 | /** |
@@ -119,9 +123,6 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) | |||
119 | crc = __cpu_to_le32(crc); | 123 | crc = __cpu_to_le32(crc); |
120 | crc = crc32_body(crc, p, len, tab); | 124 | crc = crc32_body(crc, p, len, tab); |
121 | return __le32_to_cpu(crc); | 125 | return __le32_to_cpu(crc); |
122 | #undef ENDIAN_SHIFT | ||
123 | #undef DO_CRC | ||
124 | |||
125 | # elif CRC_LE_BITS == 4 | 126 | # elif CRC_LE_BITS == 4 |
126 | while (len--) { | 127 | while (len--) { |
127 | crc ^= *p++; | 128 | crc ^= *p++; |
@@ -179,9 +180,6 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) | |||
179 | crc = __cpu_to_be32(crc); | 180 | crc = __cpu_to_be32(crc); |
180 | crc = crc32_body(crc, p, len, tab); | 181 | crc = crc32_body(crc, p, len, tab); |
181 | return __be32_to_cpu(crc); | 182 | return __be32_to_cpu(crc); |
182 | #undef ENDIAN_SHIFT | ||
183 | #undef DO_CRC | ||
184 | |||
185 | # elif CRC_BE_BITS == 4 | 183 | # elif CRC_BE_BITS == 4 |
186 | while (len--) { | 184 | while (len--) { |
187 | crc ^= *p++ << 24; | 185 | crc ^= *p++ << 24; |
diff --git a/lib/debug_locks.c b/lib/debug_locks.c index bc3b11731b9c..5bf0020b9248 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c | |||
@@ -23,6 +23,7 @@ | |||
23 | * shut up after that. | 23 | * shut up after that. |
24 | */ | 24 | */ |
25 | int debug_locks = 1; | 25 | int debug_locks = 1; |
26 | EXPORT_SYMBOL_GPL(debug_locks); | ||
26 | 27 | ||
27 | /* | 28 | /* |
28 | * The locking-testsuite uses <debug_locks_silent> to get a | 29 | * The locking-testsuite uses <debug_locks_silent> to get a |
diff --git a/lib/hweight.c b/lib/hweight.c index 389424ecb129..63ee4eb1228d 100644 --- a/lib/hweight.c +++ b/lib/hweight.c | |||
@@ -11,11 +11,18 @@ | |||
11 | 11 | ||
12 | unsigned int hweight32(unsigned int w) | 12 | unsigned int hweight32(unsigned int w) |
13 | { | 13 | { |
14 | #ifdef ARCH_HAS_FAST_MULTIPLIER | ||
15 | w -= (w >> 1) & 0x55555555; | ||
16 | w = (w & 0x33333333) + ((w >> 2) & 0x33333333); | ||
17 | w = (w + (w >> 4)) & 0x0f0f0f0f; | ||
18 | return (w * 0x01010101) >> 24; | ||
19 | #else | ||
14 | unsigned int res = w - ((w >> 1) & 0x55555555); | 20 | unsigned int res = w - ((w >> 1) & 0x55555555); |
15 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); | 21 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); |
16 | res = (res + (res >> 4)) & 0x0F0F0F0F; | 22 | res = (res + (res >> 4)) & 0x0F0F0F0F; |
17 | res = res + (res >> 8); | 23 | res = res + (res >> 8); |
18 | return (res + (res >> 16)) & 0x000000FF; | 24 | return (res + (res >> 16)) & 0x000000FF; |
25 | #endif | ||
19 | } | 26 | } |
20 | EXPORT_SYMBOL(hweight32); | 27 | EXPORT_SYMBOL(hweight32); |
21 | 28 | ||
@@ -156,10 +156,12 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |||
156 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; | 156 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
157 | 157 | ||
158 | /* if already at the top layer, we need to grow */ | 158 | /* if already at the top layer, we need to grow */ |
159 | if (!(p = pa[l])) { | 159 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
160 | *starting_id = id; | 160 | *starting_id = id; |
161 | return IDR_NEED_TO_GROW; | 161 | return IDR_NEED_TO_GROW; |
162 | } | 162 | } |
163 | p = pa[l]; | ||
164 | BUG_ON(!p); | ||
163 | 165 | ||
164 | /* If we need to go up one layer, continue the | 166 | /* If we need to go up one layer, continue the |
165 | * loop; otherwise, restart from the top. | 167 | * loop; otherwise, restart from the top. |
@@ -502,7 +504,7 @@ void *idr_find(struct idr *idp, int id) | |||
502 | int n; | 504 | int n; |
503 | struct idr_layer *p; | 505 | struct idr_layer *p; |
504 | 506 | ||
505 | p = rcu_dereference(idp->top); | 507 | p = rcu_dereference_raw(idp->top); |
506 | if (!p) | 508 | if (!p) |
507 | return NULL; | 509 | return NULL; |
508 | n = (p->layer+1) * IDR_BITS; | 510 | n = (p->layer+1) * IDR_BITS; |
@@ -517,7 +519,7 @@ void *idr_find(struct idr *idp, int id) | |||
517 | while (n > 0 && p) { | 519 | while (n > 0 && p) { |
518 | n -= IDR_BITS; | 520 | n -= IDR_BITS; |
519 | BUG_ON(n != p->layer*IDR_BITS); | 521 | BUG_ON(n != p->layer*IDR_BITS); |
520 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 522 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
521 | } | 523 | } |
522 | return((void *)p); | 524 | return((void *)p); |
523 | } | 525 | } |
@@ -550,7 +552,7 @@ int idr_for_each(struct idr *idp, | |||
550 | struct idr_layer **paa = &pa[0]; | 552 | struct idr_layer **paa = &pa[0]; |
551 | 553 | ||
552 | n = idp->layers * IDR_BITS; | 554 | n = idp->layers * IDR_BITS; |
553 | p = rcu_dereference(idp->top); | 555 | p = rcu_dereference_raw(idp->top); |
554 | max = 1 << n; | 556 | max = 1 << n; |
555 | 557 | ||
556 | id = 0; | 558 | id = 0; |
@@ -558,7 +560,7 @@ int idr_for_each(struct idr *idp, | |||
558 | while (n > 0 && p) { | 560 | while (n > 0 && p) { |
559 | n -= IDR_BITS; | 561 | n -= IDR_BITS; |
560 | *paa++ = p; | 562 | *paa++ = p; |
561 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); | 563 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
562 | } | 564 | } |
563 | 565 | ||
564 | if (p) { | 566 | if (p) { |
diff --git a/lib/kobject.c b/lib/kobject.c index b512b746d2af..8115eb1bbf4d 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr, | |||
700 | return ret; | 700 | return ret; |
701 | } | 701 | } |
702 | 702 | ||
703 | struct sysfs_ops kobj_sysfs_ops = { | 703 | const struct sysfs_ops kobj_sysfs_ops = { |
704 | .show = kobj_attr_show, | 704 | .show = kobj_attr_show, |
705 | .store = kobj_attr_store, | 705 | .store = kobj_attr_store, |
706 | }; | 706 | }; |
@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = { | |||
789 | * If the kset was not able to be created, NULL will be returned. | 789 | * If the kset was not able to be created, NULL will be returned. |
790 | */ | 790 | */ |
791 | static struct kset *kset_create(const char *name, | 791 | static struct kset *kset_create(const char *name, |
792 | struct kset_uevent_ops *uevent_ops, | 792 | const struct kset_uevent_ops *uevent_ops, |
793 | struct kobject *parent_kobj) | 793 | struct kobject *parent_kobj) |
794 | { | 794 | { |
795 | struct kset *kset; | 795 | struct kset *kset; |
@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name, | |||
832 | * If the kset was not able to be created, NULL will be returned. | 832 | * If the kset was not able to be created, NULL will be returned. |
833 | */ | 833 | */ |
834 | struct kset *kset_create_and_add(const char *name, | 834 | struct kset *kset_create_and_add(const char *name, |
835 | struct kset_uevent_ops *uevent_ops, | 835 | const struct kset_uevent_ops *uevent_ops, |
836 | struct kobject *parent_kobj) | 836 | struct kobject *parent_kobj) |
837 | { | 837 | { |
838 | struct kset *kset; | 838 | struct kset *kset; |
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 920a3ca6e259..c9d3a3e8405d 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
95 | const char *subsystem; | 95 | const char *subsystem; |
96 | struct kobject *top_kobj; | 96 | struct kobject *top_kobj; |
97 | struct kset *kset; | 97 | struct kset *kset; |
98 | struct kset_uevent_ops *uevent_ops; | 98 | const struct kset_uevent_ops *uevent_ops; |
99 | u64 seq; | 99 | u64 seq; |
100 | int i = 0; | 100 | int i = 0; |
101 | int retval = 0; | 101 | int retval = 0; |
diff --git a/lib/list_sort.c b/lib/list_sort.c index 19d11e0bb958..4b5cb794c38b 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c | |||
@@ -4,99 +4,214 @@ | |||
4 | #include <linux/slab.h> | 4 | #include <linux/slab.h> |
5 | #include <linux/list.h> | 5 | #include <linux/list.h> |
6 | 6 | ||
7 | #define MAX_LIST_LENGTH_BITS 20 | ||
8 | |||
9 | /* | ||
10 | * Returns a list organized in an intermediate format suited | ||
11 | * to chaining of merge() calls: null-terminated, no reserved or | ||
12 | * sentinel head node, "prev" links not maintained. | ||
13 | */ | ||
14 | static struct list_head *merge(void *priv, | ||
15 | int (*cmp)(void *priv, struct list_head *a, | ||
16 | struct list_head *b), | ||
17 | struct list_head *a, struct list_head *b) | ||
18 | { | ||
19 | struct list_head head, *tail = &head; | ||
20 | |||
21 | while (a && b) { | ||
22 | /* if equal, take 'a' -- important for sort stability */ | ||
23 | if ((*cmp)(priv, a, b) <= 0) { | ||
24 | tail->next = a; | ||
25 | a = a->next; | ||
26 | } else { | ||
27 | tail->next = b; | ||
28 | b = b->next; | ||
29 | } | ||
30 | tail = tail->next; | ||
31 | } | ||
32 | tail->next = a?:b; | ||
33 | return head.next; | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * Combine final list merge with restoration of standard doubly-linked | ||
38 | * list structure. This approach duplicates code from merge(), but | ||
39 | * runs faster than the tidier alternatives of either a separate final | ||
40 | * prev-link restoration pass, or maintaining the prev links | ||
41 | * throughout. | ||
42 | */ | ||
43 | static void merge_and_restore_back_links(void *priv, | ||
44 | int (*cmp)(void *priv, struct list_head *a, | ||
45 | struct list_head *b), | ||
46 | struct list_head *head, | ||
47 | struct list_head *a, struct list_head *b) | ||
48 | { | ||
49 | struct list_head *tail = head; | ||
50 | |||
51 | while (a && b) { | ||
52 | /* if equal, take 'a' -- important for sort stability */ | ||
53 | if ((*cmp)(priv, a, b) <= 0) { | ||
54 | tail->next = a; | ||
55 | a->prev = tail; | ||
56 | a = a->next; | ||
57 | } else { | ||
58 | tail->next = b; | ||
59 | b->prev = tail; | ||
60 | b = b->next; | ||
61 | } | ||
62 | tail = tail->next; | ||
63 | } | ||
64 | tail->next = a ? : b; | ||
65 | |||
66 | do { | ||
67 | /* | ||
68 | * In worst cases this loop may run many iterations. | ||
69 | * Continue callbacks to the client even though no | ||
70 | * element comparison is needed, so the client's cmp() | ||
71 | * routine can invoke cond_resched() periodically. | ||
72 | */ | ||
73 | (*cmp)(priv, tail, tail); | ||
74 | |||
75 | tail->next->prev = tail; | ||
76 | tail = tail->next; | ||
77 | } while (tail->next); | ||
78 | |||
79 | tail->next = head; | ||
80 | head->prev = tail; | ||
81 | } | ||
82 | |||
7 | /** | 83 | /** |
8 | * list_sort - sort a list. | 84 | * list_sort - sort a list |
9 | * @priv: private data, passed to @cmp | 85 | * @priv: private data, opaque to list_sort(), passed to @cmp |
10 | * @head: the list to sort | 86 | * @head: the list to sort |
11 | * @cmp: the elements comparison function | 87 | * @cmp: the elements comparison function |
12 | * | 88 | * |
13 | * This function has been implemented by Mark J Roberts <mjr@znex.org>. It | 89 | * This function implements "merge sort", which has O(nlog(n)) |
14 | * implements "merge sort" which has O(nlog(n)) complexity. The list is sorted | 90 | * complexity. |
15 | * in ascending order. | ||
16 | * | 91 | * |
17 | * The comparison function @cmp is supposed to return a negative value if @a is | 92 | * The comparison function @cmp must return a negative value if @a |
18 | * less than @b, and a positive value if @a is greater than @b. If @a and @b | 93 | * should sort before @b, and a positive value if @a should sort after |
19 | * are equivalent, then it does not matter what this function returns. | 94 | * @b. If @a and @b are equivalent, and their original relative |
95 | * ordering is to be preserved, @cmp must return 0. | ||
20 | */ | 96 | */ |
21 | void list_sort(void *priv, struct list_head *head, | 97 | void list_sort(void *priv, struct list_head *head, |
22 | int (*cmp)(void *priv, struct list_head *a, | 98 | int (*cmp)(void *priv, struct list_head *a, |
23 | struct list_head *b)) | 99 | struct list_head *b)) |
24 | { | 100 | { |
25 | struct list_head *p, *q, *e, *list, *tail, *oldhead; | 101 | struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists |
26 | int insize, nmerges, psize, qsize, i; | 102 | -- last slot is a sentinel */ |
103 | int lev; /* index into part[] */ | ||
104 | int max_lev = 0; | ||
105 | struct list_head *list; | ||
27 | 106 | ||
28 | if (list_empty(head)) | 107 | if (list_empty(head)) |
29 | return; | 108 | return; |
30 | 109 | ||
110 | memset(part, 0, sizeof(part)); | ||
111 | |||
112 | head->prev->next = NULL; | ||
31 | list = head->next; | 113 | list = head->next; |
32 | list_del(head); | ||
33 | insize = 1; | ||
34 | for (;;) { | ||
35 | p = oldhead = list; | ||
36 | list = tail = NULL; | ||
37 | nmerges = 0; | ||
38 | |||
39 | while (p) { | ||
40 | nmerges++; | ||
41 | q = p; | ||
42 | psize = 0; | ||
43 | for (i = 0; i < insize; i++) { | ||
44 | psize++; | ||
45 | q = q->next == oldhead ? NULL : q->next; | ||
46 | if (!q) | ||
47 | break; | ||
48 | } | ||
49 | 114 | ||
50 | qsize = insize; | 115 | while (list) { |
51 | while (psize > 0 || (qsize > 0 && q)) { | 116 | struct list_head *cur = list; |
52 | if (!psize) { | 117 | list = list->next; |
53 | e = q; | 118 | cur->next = NULL; |
54 | q = q->next; | 119 | |
55 | qsize--; | 120 | for (lev = 0; part[lev]; lev++) { |
56 | if (q == oldhead) | 121 | cur = merge(priv, cmp, part[lev], cur); |
57 | q = NULL; | 122 | part[lev] = NULL; |
58 | } else if (!qsize || !q) { | 123 | } |
59 | e = p; | 124 | if (lev > max_lev) { |
60 | p = p->next; | 125 | if (unlikely(lev >= ARRAY_SIZE(part)-1)) { |
61 | psize--; | 126 | printk_once(KERN_DEBUG "list passed to" |
62 | if (p == oldhead) | 127 | " list_sort() too long for" |
63 | p = NULL; | 128 | " efficiency\n"); |
64 | } else if (cmp(priv, p, q) <= 0) { | 129 | lev--; |
65 | e = p; | ||
66 | p = p->next; | ||
67 | psize--; | ||
68 | if (p == oldhead) | ||
69 | p = NULL; | ||
70 | } else { | ||
71 | e = q; | ||
72 | q = q->next; | ||
73 | qsize--; | ||
74 | if (q == oldhead) | ||
75 | q = NULL; | ||
76 | } | ||
77 | if (tail) | ||
78 | tail->next = e; | ||
79 | else | ||
80 | list = e; | ||
81 | e->prev = tail; | ||
82 | tail = e; | ||
83 | } | 130 | } |
84 | p = q; | 131 | max_lev = lev; |
85 | } | 132 | } |
133 | part[lev] = cur; | ||
134 | } | ||
86 | 135 | ||
87 | tail->next = list; | 136 | for (lev = 0; lev < max_lev; lev++) |
88 | list->prev = tail; | 137 | if (part[lev]) |
138 | list = merge(priv, cmp, part[lev], list); | ||
89 | 139 | ||
90 | if (nmerges <= 1) | 140 | merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); |
91 | break; | 141 | } |
142 | EXPORT_SYMBOL(list_sort); | ||
92 | 143 | ||
93 | insize *= 2; | 144 | #ifdef DEBUG_LIST_SORT |
94 | } | 145 | struct debug_el { |
146 | struct list_head l_h; | ||
147 | int value; | ||
148 | unsigned serial; | ||
149 | }; | ||
95 | 150 | ||
96 | head->next = list; | 151 | static int cmp(void *priv, struct list_head *a, struct list_head *b) |
97 | head->prev = list->prev; | 152 | { |
98 | list->prev->next = head; | 153 | return container_of(a, struct debug_el, l_h)->value |
99 | list->prev = head; | 154 | - container_of(b, struct debug_el, l_h)->value; |
100 | } | 155 | } |
101 | 156 | ||
102 | EXPORT_SYMBOL(list_sort); | 157 | /* |
158 | * The pattern of set bits in the list length determines which cases | ||
159 | * are hit in list_sort(). | ||
160 | */ | ||
161 | #define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */ | ||
162 | |||
163 | static int __init list_sort_test(void) | ||
164 | { | ||
165 | int i, r = 1, count; | ||
166 | struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL); | ||
167 | struct list_head *cur; | ||
168 | |||
169 | printk(KERN_WARNING "testing list_sort()\n"); | ||
170 | |||
171 | cur = head; | ||
172 | for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) { | ||
173 | struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL); | ||
174 | BUG_ON(!el); | ||
175 | /* force some equivalencies */ | ||
176 | el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3); | ||
177 | el->serial = i; | ||
178 | |||
179 | el->l_h.prev = cur; | ||
180 | cur->next = &el->l_h; | ||
181 | cur = cur->next; | ||
182 | } | ||
183 | head->prev = cur; | ||
184 | |||
185 | list_sort(NULL, head, cmp); | ||
186 | |||
187 | count = 1; | ||
188 | for (cur = head->next; cur->next != head; cur = cur->next) { | ||
189 | struct debug_el *el = container_of(cur, struct debug_el, l_h); | ||
190 | int cmp_result = cmp(NULL, cur, cur->next); | ||
191 | if (cur->next->prev != cur) { | ||
192 | printk(KERN_EMERG "list_sort() returned " | ||
193 | "a corrupted list!\n"); | ||
194 | return 1; | ||
195 | } else if (cmp_result > 0) { | ||
196 | printk(KERN_EMERG "list_sort() failed to sort!\n"); | ||
197 | return 1; | ||
198 | } else if (cmp_result == 0 && | ||
199 | el->serial >= container_of(cur->next, | ||
200 | struct debug_el, l_h)->serial) { | ||
201 | printk(KERN_EMERG "list_sort() failed to preserve order" | ||
202 | " of equivalent elements!\n"); | ||
203 | return 1; | ||
204 | } | ||
205 | kfree(cur->prev); | ||
206 | count++; | ||
207 | } | ||
208 | kfree(cur); | ||
209 | if (count != LIST_SORT_TEST_LENGTH) { | ||
210 | printk(KERN_EMERG "list_sort() returned list of" | ||
211 | "different length!\n"); | ||
212 | return 1; | ||
213 | } | ||
214 | return 0; | ||
215 | } | ||
216 | module_init(list_sort_test); | ||
217 | #endif | ||
@@ -205,9 +205,8 @@ long lmb_add(u64 base, u64 size) | |||
205 | 205 | ||
206 | } | 206 | } |
207 | 207 | ||
208 | long lmb_remove(u64 base, u64 size) | 208 | static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size) |
209 | { | 209 | { |
210 | struct lmb_region *rgn = &(lmb.memory); | ||
211 | u64 rgnbegin, rgnend; | 210 | u64 rgnbegin, rgnend; |
212 | u64 end = base + size; | 211 | u64 end = base + size; |
213 | int i; | 212 | int i; |
@@ -254,6 +253,16 @@ long lmb_remove(u64 base, u64 size) | |||
254 | return lmb_add_region(rgn, end, rgnend - end); | 253 | return lmb_add_region(rgn, end, rgnend - end); |
255 | } | 254 | } |
256 | 255 | ||
256 | long lmb_remove(u64 base, u64 size) | ||
257 | { | ||
258 | return __lmb_remove(&lmb.memory, base, size); | ||
259 | } | ||
260 | |||
261 | long __init lmb_free(u64 base, u64 size) | ||
262 | { | ||
263 | return __lmb_remove(&lmb.reserved, base, size); | ||
264 | } | ||
265 | |||
257 | long __init lmb_reserve(u64 base, u64 size) | 266 | long __init lmb_reserve(u64 base, u64 size) |
258 | { | 267 | { |
259 | struct lmb_region *_rgn = &lmb.reserved; | 268 | struct lmb_region *_rgn = &lmb.reserved; |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 92cdd9936e3d..6b9670d6bbf9 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -364,7 +364,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
364 | unsigned int height, shift; | 364 | unsigned int height, shift; |
365 | struct radix_tree_node *node, **slot; | 365 | struct radix_tree_node *node, **slot; |
366 | 366 | ||
367 | node = rcu_dereference(root->rnode); | 367 | node = rcu_dereference_raw(root->rnode); |
368 | if (node == NULL) | 368 | if (node == NULL) |
369 | return NULL; | 369 | return NULL; |
370 | 370 | ||
@@ -384,7 +384,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
384 | do { | 384 | do { |
385 | slot = (struct radix_tree_node **) | 385 | slot = (struct radix_tree_node **) |
386 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); | 386 | (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); |
387 | node = rcu_dereference(*slot); | 387 | node = rcu_dereference_raw(*slot); |
388 | if (node == NULL) | 388 | if (node == NULL) |
389 | return NULL; | 389 | return NULL; |
390 | 390 | ||
@@ -568,7 +568,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
568 | if (!root_tag_get(root, tag)) | 568 | if (!root_tag_get(root, tag)) |
569 | return 0; | 569 | return 0; |
570 | 570 | ||
571 | node = rcu_dereference(root->rnode); | 571 | node = rcu_dereference_raw(root->rnode); |
572 | if (node == NULL) | 572 | if (node == NULL) |
573 | return 0; | 573 | return 0; |
574 | 574 | ||
@@ -602,7 +602,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
602 | BUG_ON(ret && saw_unset_tag); | 602 | BUG_ON(ret && saw_unset_tag); |
603 | return !!ret; | 603 | return !!ret; |
604 | } | 604 | } |
605 | node = rcu_dereference(node->slots[offset]); | 605 | node = rcu_dereference_raw(node->slots[offset]); |
606 | shift -= RADIX_TREE_MAP_SHIFT; | 606 | shift -= RADIX_TREE_MAP_SHIFT; |
607 | height--; | 607 | height--; |
608 | } | 608 | } |
@@ -711,7 +711,7 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, | |||
711 | } | 711 | } |
712 | 712 | ||
713 | shift -= RADIX_TREE_MAP_SHIFT; | 713 | shift -= RADIX_TREE_MAP_SHIFT; |
714 | slot = rcu_dereference(slot->slots[i]); | 714 | slot = rcu_dereference_raw(slot->slots[i]); |
715 | if (slot == NULL) | 715 | if (slot == NULL) |
716 | goto out; | 716 | goto out; |
717 | } | 717 | } |
@@ -758,7 +758,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
758 | unsigned long cur_index = first_index; | 758 | unsigned long cur_index = first_index; |
759 | unsigned int ret; | 759 | unsigned int ret; |
760 | 760 | ||
761 | node = rcu_dereference(root->rnode); | 761 | node = rcu_dereference_raw(root->rnode); |
762 | if (!node) | 762 | if (!node) |
763 | return 0; | 763 | return 0; |
764 | 764 | ||
@@ -787,7 +787,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
787 | slot = *(((void ***)results)[ret + i]); | 787 | slot = *(((void ***)results)[ret + i]); |
788 | if (!slot) | 788 | if (!slot) |
789 | continue; | 789 | continue; |
790 | results[ret + nr_found] = rcu_dereference(slot); | 790 | results[ret + nr_found] = rcu_dereference_raw(slot); |
791 | nr_found++; | 791 | nr_found++; |
792 | } | 792 | } |
793 | ret += nr_found; | 793 | ret += nr_found; |
@@ -826,7 +826,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | |||
826 | unsigned long cur_index = first_index; | 826 | unsigned long cur_index = first_index; |
827 | unsigned int ret; | 827 | unsigned int ret; |
828 | 828 | ||
829 | node = rcu_dereference(root->rnode); | 829 | node = rcu_dereference_raw(root->rnode); |
830 | if (!node) | 830 | if (!node) |
831 | return 0; | 831 | return 0; |
832 | 832 | ||
@@ -915,7 +915,7 @@ __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, | |||
915 | } | 915 | } |
916 | } | 916 | } |
917 | shift -= RADIX_TREE_MAP_SHIFT; | 917 | shift -= RADIX_TREE_MAP_SHIFT; |
918 | slot = rcu_dereference(slot->slots[i]); | 918 | slot = rcu_dereference_raw(slot->slots[i]); |
919 | if (slot == NULL) | 919 | if (slot == NULL) |
920 | break; | 920 | break; |
921 | } | 921 | } |
@@ -951,7 +951,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
951 | if (!root_tag_get(root, tag)) | 951 | if (!root_tag_get(root, tag)) |
952 | return 0; | 952 | return 0; |
953 | 953 | ||
954 | node = rcu_dereference(root->rnode); | 954 | node = rcu_dereference_raw(root->rnode); |
955 | if (!node) | 955 | if (!node) |
956 | return 0; | 956 | return 0; |
957 | 957 | ||
@@ -980,7 +980,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
980 | slot = *(((void ***)results)[ret + i]); | 980 | slot = *(((void ***)results)[ret + i]); |
981 | if (!slot) | 981 | if (!slot) |
982 | continue; | 982 | continue; |
983 | results[ret + nr_found] = rcu_dereference(slot); | 983 | results[ret + nr_found] = rcu_dereference_raw(slot); |
984 | nr_found++; | 984 | nr_found++; |
985 | } | 985 | } |
986 | ret += nr_found; | 986 | ret += nr_found; |
@@ -1020,7 +1020,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |||
1020 | if (!root_tag_get(root, tag)) | 1020 | if (!root_tag_get(root, tag)) |
1021 | return 0; | 1021 | return 0; |
1022 | 1022 | ||
1023 | node = rcu_dereference(root->rnode); | 1023 | node = rcu_dereference_raw(root->rnode); |
1024 | if (!node) | 1024 | if (!node) |
1025 | return 0; | 1025 | return 0; |
1026 | 1026 | ||
diff --git a/lib/show_mem.c b/lib/show_mem.c index 238e72a18ce1..fdc77c82f922 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c | |||
@@ -15,7 +15,7 @@ void show_mem(void) | |||
15 | unsigned long total = 0, reserved = 0, shared = 0, | 15 | unsigned long total = 0, reserved = 0, shared = 0, |
16 | nonshared = 0, highmem = 0; | 16 | nonshared = 0, highmem = 0; |
17 | 17 | ||
18 | printk(KERN_INFO "Mem-Info:\n"); | 18 | printk("Mem-Info:\n"); |
19 | show_free_areas(); | 19 | show_free_areas(); |
20 | 20 | ||
21 | for_each_online_pgdat(pgdat) { | 21 | for_each_online_pgdat(pgdat) { |
@@ -49,15 +49,15 @@ void show_mem(void) | |||
49 | pgdat_resize_unlock(pgdat, &flags); | 49 | pgdat_resize_unlock(pgdat, &flags); |
50 | } | 50 | } |
51 | 51 | ||
52 | printk(KERN_INFO "%lu pages RAM\n", total); | 52 | printk("%lu pages RAM\n", total); |
53 | #ifdef CONFIG_HIGHMEM | 53 | #ifdef CONFIG_HIGHMEM |
54 | printk(KERN_INFO "%lu pages HighMem\n", highmem); | 54 | printk("%lu pages HighMem\n", highmem); |
55 | #endif | 55 | #endif |
56 | printk(KERN_INFO "%lu pages reserved\n", reserved); | 56 | printk("%lu pages reserved\n", reserved); |
57 | printk(KERN_INFO "%lu pages shared\n", shared); | 57 | printk("%lu pages shared\n", shared); |
58 | printk(KERN_INFO "%lu pages non-shared\n", nonshared); | 58 | printk("%lu pages non-shared\n", nonshared); |
59 | #ifdef CONFIG_QUICKLIST | 59 | #ifdef CONFIG_QUICKLIST |
60 | printk(KERN_INFO "%lu pages in pagetable cache\n", | 60 | printk("%lu pages in pagetable cache\n", |
61 | quicklist_total_size()); | 61 | quicklist_total_size()); |
62 | #endif | 62 | #endif |
63 | } | 63 | } |
diff --git a/lib/string.c b/lib/string.c index a1cdcfcc42d0..f71bead1be3e 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -36,25 +36,21 @@ int strnicmp(const char *s1, const char *s2, size_t len) | |||
36 | /* Yes, Virginia, it had better be unsigned */ | 36 | /* Yes, Virginia, it had better be unsigned */ |
37 | unsigned char c1, c2; | 37 | unsigned char c1, c2; |
38 | 38 | ||
39 | c1 = c2 = 0; | 39 | if (!len) |
40 | if (len) { | 40 | return 0; |
41 | do { | 41 | |
42 | c1 = *s1; | 42 | do { |
43 | c2 = *s2; | 43 | c1 = *s1++; |
44 | s1++; | 44 | c2 = *s2++; |
45 | s2++; | 45 | if (!c1 || !c2) |
46 | if (!c1) | 46 | break; |
47 | break; | 47 | if (c1 == c2) |
48 | if (!c2) | 48 | continue; |
49 | break; | 49 | c1 = tolower(c1); |
50 | if (c1 == c2) | 50 | c2 = tolower(c2); |
51 | continue; | 51 | if (c1 != c2) |
52 | c1 = tolower(c1); | 52 | break; |
53 | c2 = tolower(c2); | 53 | } while (--len); |
54 | if (c1 != c2) | ||
55 | break; | ||
56 | } while (--len); | ||
57 | } | ||
58 | return (int)c1 - (int)c2; | 54 | return (int)c1 - (int)c2; |
59 | } | 55 | } |
60 | EXPORT_SYMBOL(strnicmp); | 56 | EXPORT_SYMBOL(strnicmp); |
@@ -693,13 +689,13 @@ EXPORT_SYMBOL(strstr); | |||
693 | */ | 689 | */ |
694 | char *strnstr(const char *s1, const char *s2, size_t len) | 690 | char *strnstr(const char *s1, const char *s2, size_t len) |
695 | { | 691 | { |
696 | size_t l1 = len, l2; | 692 | size_t l2; |
697 | 693 | ||
698 | l2 = strlen(s2); | 694 | l2 = strlen(s2); |
699 | if (!l2) | 695 | if (!l2) |
700 | return (char *)s1; | 696 | return (char *)s1; |
701 | while (l1 >= l2) { | 697 | while (len >= l2) { |
702 | l1--; | 698 | len--; |
703 | if (!memcmp(s1, s2, l2)) | 699 | if (!memcmp(s1, s2, l2)) |
704 | return (char *)s1; | 700 | return (char *)s1; |
705 | s1++; | 701 | s1++; |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 3b8aeec4e327..24112e5a5780 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -381,8 +381,8 @@ static noinline char *put_dec(char *buf, unsigned long long num) | |||
381 | #define PLUS 4 /* show plus */ | 381 | #define PLUS 4 /* show plus */ |
382 | #define SPACE 8 /* space if plus */ | 382 | #define SPACE 8 /* space if plus */ |
383 | #define LEFT 16 /* left justified */ | 383 | #define LEFT 16 /* left justified */ |
384 | #define SMALL 32 /* Must be 32 == 0x20 */ | 384 | #define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */ |
385 | #define SPECIAL 64 /* 0x */ | 385 | #define SPECIAL 64 /* prefix hex with "0x", octal with "0" */ |
386 | 386 | ||
387 | enum format_type { | 387 | enum format_type { |
388 | FORMAT_TYPE_NONE, /* Just a string part */ | 388 | FORMAT_TYPE_NONE, /* Just a string part */ |
@@ -408,12 +408,12 @@ enum format_type { | |||
408 | }; | 408 | }; |
409 | 409 | ||
410 | struct printf_spec { | 410 | struct printf_spec { |
411 | enum format_type type; | 411 | u16 type; |
412 | int flags; /* flags to number() */ | 412 | s16 field_width; /* width of output field */ |
413 | int field_width; /* width of output field */ | 413 | u8 flags; /* flags to number() */ |
414 | int base; | 414 | u8 base; |
415 | int precision; /* # of digits/chars */ | 415 | s8 precision; /* # of digits/chars */ |
416 | int qualifier; | 416 | u8 qualifier; |
417 | }; | 417 | }; |
418 | 418 | ||
419 | static char *number(char *buf, char *end, unsigned long long num, | 419 | static char *number(char *buf, char *end, unsigned long long num, |
@@ -597,22 +597,35 @@ static char *resource_string(char *buf, char *end, struct resource *res, | |||
597 | #ifndef MEM_RSRC_PRINTK_SIZE | 597 | #ifndef MEM_RSRC_PRINTK_SIZE |
598 | #define MEM_RSRC_PRINTK_SIZE 10 | 598 | #define MEM_RSRC_PRINTK_SIZE 10 |
599 | #endif | 599 | #endif |
600 | struct printf_spec hex_spec = { | 600 | static const struct printf_spec io_spec = { |
601 | .base = 16, | 601 | .base = 16, |
602 | .field_width = IO_RSRC_PRINTK_SIZE, | ||
602 | .precision = -1, | 603 | .precision = -1, |
603 | .flags = SPECIAL | SMALL | ZEROPAD, | 604 | .flags = SPECIAL | SMALL | ZEROPAD, |
604 | }; | 605 | }; |
605 | struct printf_spec dec_spec = { | 606 | static const struct printf_spec mem_spec = { |
607 | .base = 16, | ||
608 | .field_width = MEM_RSRC_PRINTK_SIZE, | ||
609 | .precision = -1, | ||
610 | .flags = SPECIAL | SMALL | ZEROPAD, | ||
611 | }; | ||
612 | static const struct printf_spec bus_spec = { | ||
613 | .base = 16, | ||
614 | .field_width = 2, | ||
615 | .precision = -1, | ||
616 | .flags = SMALL | ZEROPAD, | ||
617 | }; | ||
618 | static const struct printf_spec dec_spec = { | ||
606 | .base = 10, | 619 | .base = 10, |
607 | .precision = -1, | 620 | .precision = -1, |
608 | .flags = 0, | 621 | .flags = 0, |
609 | }; | 622 | }; |
610 | struct printf_spec str_spec = { | 623 | static const struct printf_spec str_spec = { |
611 | .field_width = -1, | 624 | .field_width = -1, |
612 | .precision = 10, | 625 | .precision = 10, |
613 | .flags = LEFT, | 626 | .flags = LEFT, |
614 | }; | 627 | }; |
615 | struct printf_spec flag_spec = { | 628 | static const struct printf_spec flag_spec = { |
616 | .base = 16, | 629 | .base = 16, |
617 | .precision = -1, | 630 | .precision = -1, |
618 | .flags = SPECIAL | SMALL, | 631 | .flags = SPECIAL | SMALL, |
@@ -622,47 +635,48 @@ static char *resource_string(char *buf, char *end, struct resource *res, | |||
622 | * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ | 635 | * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ |
623 | #define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4) | 636 | #define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4) |
624 | #define FLAG_BUF_SIZE (2 * sizeof(res->flags)) | 637 | #define FLAG_BUF_SIZE (2 * sizeof(res->flags)) |
625 | #define DECODED_BUF_SIZE sizeof("[mem - 64bit pref disabled]") | 638 | #define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]") |
626 | #define RAW_BUF_SIZE sizeof("[mem - flags 0x]") | 639 | #define RAW_BUF_SIZE sizeof("[mem - flags 0x]") |
627 | char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, | 640 | char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, |
628 | 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; | 641 | 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; |
629 | 642 | ||
630 | char *p = sym, *pend = sym + sizeof(sym); | 643 | char *p = sym, *pend = sym + sizeof(sym); |
631 | int size = -1, addr = 0; | ||
632 | int decode = (fmt[0] == 'R') ? 1 : 0; | 644 | int decode = (fmt[0] == 'R') ? 1 : 0; |
633 | 645 | const struct printf_spec *specp; | |
634 | if (res->flags & IORESOURCE_IO) { | ||
635 | size = IO_RSRC_PRINTK_SIZE; | ||
636 | addr = 1; | ||
637 | } else if (res->flags & IORESOURCE_MEM) { | ||
638 | size = MEM_RSRC_PRINTK_SIZE; | ||
639 | addr = 1; | ||
640 | } | ||
641 | 646 | ||
642 | *p++ = '['; | 647 | *p++ = '['; |
643 | if (res->flags & IORESOURCE_IO) | 648 | if (res->flags & IORESOURCE_IO) { |
644 | p = string(p, pend, "io ", str_spec); | 649 | p = string(p, pend, "io ", str_spec); |
645 | else if (res->flags & IORESOURCE_MEM) | 650 | specp = &io_spec; |
651 | } else if (res->flags & IORESOURCE_MEM) { | ||
646 | p = string(p, pend, "mem ", str_spec); | 652 | p = string(p, pend, "mem ", str_spec); |
647 | else if (res->flags & IORESOURCE_IRQ) | 653 | specp = &mem_spec; |
654 | } else if (res->flags & IORESOURCE_IRQ) { | ||
648 | p = string(p, pend, "irq ", str_spec); | 655 | p = string(p, pend, "irq ", str_spec); |
649 | else if (res->flags & IORESOURCE_DMA) | 656 | specp = &dec_spec; |
657 | } else if (res->flags & IORESOURCE_DMA) { | ||
650 | p = string(p, pend, "dma ", str_spec); | 658 | p = string(p, pend, "dma ", str_spec); |
651 | else { | 659 | specp = &dec_spec; |
660 | } else if (res->flags & IORESOURCE_BUS) { | ||
661 | p = string(p, pend, "bus ", str_spec); | ||
662 | specp = &bus_spec; | ||
663 | } else { | ||
652 | p = string(p, pend, "??? ", str_spec); | 664 | p = string(p, pend, "??? ", str_spec); |
665 | specp = &mem_spec; | ||
653 | decode = 0; | 666 | decode = 0; |
654 | } | 667 | } |
655 | hex_spec.field_width = size; | 668 | p = number(p, pend, res->start, *specp); |
656 | p = number(p, pend, res->start, addr ? hex_spec : dec_spec); | ||
657 | if (res->start != res->end) { | 669 | if (res->start != res->end) { |
658 | *p++ = '-'; | 670 | *p++ = '-'; |
659 | p = number(p, pend, res->end, addr ? hex_spec : dec_spec); | 671 | p = number(p, pend, res->end, *specp); |
660 | } | 672 | } |
661 | if (decode) { | 673 | if (decode) { |
662 | if (res->flags & IORESOURCE_MEM_64) | 674 | if (res->flags & IORESOURCE_MEM_64) |
663 | p = string(p, pend, " 64bit", str_spec); | 675 | p = string(p, pend, " 64bit", str_spec); |
664 | if (res->flags & IORESOURCE_PREFETCH) | 676 | if (res->flags & IORESOURCE_PREFETCH) |
665 | p = string(p, pend, " pref", str_spec); | 677 | p = string(p, pend, " pref", str_spec); |
678 | if (res->flags & IORESOURCE_WINDOW) | ||
679 | p = string(p, pend, " window", str_spec); | ||
666 | if (res->flags & IORESOURCE_DISABLED) | 680 | if (res->flags & IORESOURCE_DISABLED) |
667 | p = string(p, pend, " disabled", str_spec); | 681 | p = string(p, pend, " disabled", str_spec); |
668 | } else { | 682 | } else { |
@@ -681,24 +695,55 @@ static char *mac_address_string(char *buf, char *end, u8 *addr, | |||
681 | char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; | 695 | char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; |
682 | char *p = mac_addr; | 696 | char *p = mac_addr; |
683 | int i; | 697 | int i; |
698 | char separator; | ||
699 | |||
700 | if (fmt[1] == 'F') { /* FDDI canonical format */ | ||
701 | separator = '-'; | ||
702 | } else { | ||
703 | separator = ':'; | ||
704 | } | ||
684 | 705 | ||
685 | for (i = 0; i < 6; i++) { | 706 | for (i = 0; i < 6; i++) { |
686 | p = pack_hex_byte(p, addr[i]); | 707 | p = pack_hex_byte(p, addr[i]); |
687 | if (fmt[0] == 'M' && i != 5) | 708 | if (fmt[0] == 'M' && i != 5) |
688 | *p++ = ':'; | 709 | *p++ = separator; |
689 | } | 710 | } |
690 | *p = '\0'; | 711 | *p = '\0'; |
691 | 712 | ||
692 | return string(buf, end, mac_addr, spec); | 713 | return string(buf, end, mac_addr, spec); |
693 | } | 714 | } |
694 | 715 | ||
695 | static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) | 716 | static char *ip4_string(char *p, const u8 *addr, const char *fmt) |
696 | { | 717 | { |
697 | int i; | 718 | int i; |
698 | 719 | bool leading_zeros = (fmt[0] == 'i'); | |
720 | int index; | ||
721 | int step; | ||
722 | |||
723 | switch (fmt[2]) { | ||
724 | case 'h': | ||
725 | #ifdef __BIG_ENDIAN | ||
726 | index = 0; | ||
727 | step = 1; | ||
728 | #else | ||
729 | index = 3; | ||
730 | step = -1; | ||
731 | #endif | ||
732 | break; | ||
733 | case 'l': | ||
734 | index = 3; | ||
735 | step = -1; | ||
736 | break; | ||
737 | case 'n': | ||
738 | case 'b': | ||
739 | default: | ||
740 | index = 0; | ||
741 | step = 1; | ||
742 | break; | ||
743 | } | ||
699 | for (i = 0; i < 4; i++) { | 744 | for (i = 0; i < 4; i++) { |
700 | char temp[3]; /* hold each IP quad in reverse order */ | 745 | char temp[3]; /* hold each IP quad in reverse order */ |
701 | int digits = put_dec_trunc(temp, addr[i]) - temp; | 746 | int digits = put_dec_trunc(temp, addr[index]) - temp; |
702 | if (leading_zeros) { | 747 | if (leading_zeros) { |
703 | if (digits < 3) | 748 | if (digits < 3) |
704 | *p++ = '0'; | 749 | *p++ = '0'; |
@@ -710,6 +755,7 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) | |||
710 | *p++ = temp[digits]; | 755 | *p++ = temp[digits]; |
711 | if (i < 3) | 756 | if (i < 3) |
712 | *p++ = '.'; | 757 | *p++ = '.'; |
758 | index += step; | ||
713 | } | 759 | } |
714 | *p = '\0'; | 760 | *p = '\0'; |
715 | 761 | ||
@@ -789,7 +835,7 @@ static char *ip6_compressed_string(char *p, const char *addr) | |||
789 | if (useIPv4) { | 835 | if (useIPv4) { |
790 | if (needcolon) | 836 | if (needcolon) |
791 | *p++ = ':'; | 837 | *p++ = ':'; |
792 | p = ip4_string(p, &in6.s6_addr[12], false); | 838 | p = ip4_string(p, &in6.s6_addr[12], "I4"); |
793 | } | 839 | } |
794 | *p = '\0'; | 840 | *p = '\0'; |
795 | 841 | ||
@@ -829,7 +875,7 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr, | |||
829 | { | 875 | { |
830 | char ip4_addr[sizeof("255.255.255.255")]; | 876 | char ip4_addr[sizeof("255.255.255.255")]; |
831 | 877 | ||
832 | ip4_string(ip4_addr, addr, fmt[0] == 'i'); | 878 | ip4_string(ip4_addr, addr, fmt); |
833 | 879 | ||
834 | return string(buf, end, ip4_addr, spec); | 880 | return string(buf, end, ip4_addr, spec); |
835 | } | 881 | } |
@@ -896,12 +942,15 @@ static char *uuid_string(char *buf, char *end, const u8 *addr, | |||
896 | * - 'M' For a 6-byte MAC address, it prints the address in the | 942 | * - 'M' For a 6-byte MAC address, it prints the address in the |
897 | * usual colon-separated hex notation | 943 | * usual colon-separated hex notation |
898 | * - 'm' For a 6-byte MAC address, it prints the hex address without colons | 944 | * - 'm' For a 6-byte MAC address, it prints the hex address without colons |
945 | * - 'MF' For a 6-byte MAC FDDI address, it prints the address | ||
946 | * with a dash-separated hex notation | ||
899 | * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way | 947 | * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way |
900 | * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) | 948 | * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) |
901 | * IPv6 uses colon separated network-order 16 bit hex with leading 0's | 949 | * IPv6 uses colon separated network-order 16 bit hex with leading 0's |
902 | * - 'i' [46] for 'raw' IPv4/IPv6 addresses | 950 | * - 'i' [46] for 'raw' IPv4/IPv6 addresses |
903 | * IPv6 omits the colons (01020304...0f) | 951 | * IPv6 omits the colons (01020304...0f) |
904 | * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) | 952 | * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) |
953 | * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order | ||
905 | * - 'I6c' for IPv6 addresses printed as specified by | 954 | * - 'I6c' for IPv6 addresses printed as specified by |
906 | * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 | 955 | * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 |
907 | * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form | 956 | * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form |
@@ -939,6 +988,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
939 | return resource_string(buf, end, ptr, spec, fmt); | 988 | return resource_string(buf, end, ptr, spec, fmt); |
940 | case 'M': /* Colon separated: 00:01:02:03:04:05 */ | 989 | case 'M': /* Colon separated: 00:01:02:03:04:05 */ |
941 | case 'm': /* Contiguous: 000102030405 */ | 990 | case 'm': /* Contiguous: 000102030405 */ |
991 | /* [mM]F (FDDI, bit reversed) */ | ||
942 | return mac_address_string(buf, end, ptr, spec, fmt); | 992 | return mac_address_string(buf, end, ptr, spec, fmt); |
943 | case 'I': /* Formatted IP supported | 993 | case 'I': /* Formatted IP supported |
944 | * 4: 1.2.3.4 | 994 | * 4: 1.2.3.4 |
@@ -1297,7 +1347,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
1297 | break; | 1347 | break; |
1298 | 1348 | ||
1299 | case FORMAT_TYPE_NRCHARS: { | 1349 | case FORMAT_TYPE_NRCHARS: { |
1300 | int qualifier = spec.qualifier; | 1350 | u8 qualifier = spec.qualifier; |
1301 | 1351 | ||
1302 | if (qualifier == 'l') { | 1352 | if (qualifier == 'l') { |
1303 | long *ip = va_arg(args, long *); | 1353 | long *ip = va_arg(args, long *); |
@@ -1583,7 +1633,7 @@ do { \ | |||
1583 | 1633 | ||
1584 | case FORMAT_TYPE_NRCHARS: { | 1634 | case FORMAT_TYPE_NRCHARS: { |
1585 | /* skip %n 's argument */ | 1635 | /* skip %n 's argument */ |
1586 | int qualifier = spec.qualifier; | 1636 | u8 qualifier = spec.qualifier; |
1587 | void *skip_arg; | 1637 | void *skip_arg; |
1588 | if (qualifier == 'l') | 1638 | if (qualifier == 'l') |
1589 | skip_arg = va_arg(args, long *); | 1639 | skip_arg = va_arg(args, long *); |
@@ -1849,7 +1899,9 @@ int vsscanf(const char *buf, const char *fmt, va_list args) | |||
1849 | char *next; | 1899 | char *next; |
1850 | char digit; | 1900 | char digit; |
1851 | int num = 0; | 1901 | int num = 0; |
1852 | int qualifier, base, field_width; | 1902 | u8 qualifier; |
1903 | u8 base; | ||
1904 | s16 field_width; | ||
1853 | bool is_sign; | 1905 | bool is_sign; |
1854 | 1906 | ||
1855 | while (*fmt && *str) { | 1907 | while (*fmt && *str) { |
@@ -1927,7 +1979,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args) | |||
1927 | { | 1979 | { |
1928 | char *s = (char *)va_arg(args, char *); | 1980 | char *s = (char *)va_arg(args, char *); |
1929 | if (field_width == -1) | 1981 | if (field_width == -1) |
1930 | field_width = INT_MAX; | 1982 | field_width = SHORT_MAX; |
1931 | /* first, skip leading white space in buffer */ | 1983 | /* first, skip leading white space in buffer */ |
1932 | str = skip_spaces(str); | 1984 | str = skip_spaces(str); |
1933 | 1985 | ||
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c index 215447c55261..2c13ecc5bb2c 100644 --- a/lib/zlib_inflate/inffast.c +++ b/lib/zlib_inflate/inffast.c | |||
@@ -8,21 +8,6 @@ | |||
8 | #include "inflate.h" | 8 | #include "inflate.h" |
9 | #include "inffast.h" | 9 | #include "inffast.h" |
10 | 10 | ||
11 | /* Only do the unaligned "Faster" variant when | ||
12 | * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is set | ||
13 | * | ||
14 | * On powerpc, it won't be as we don't include autoconf.h | ||
15 | * automatically for the boot wrapper, which is intended as | ||
16 | * we run in an environment where we may not be able to deal | ||
17 | * with (even rare) alignment faults. In addition, we do not | ||
18 | * define __KERNEL__ for arch/powerpc/boot unlike x86 | ||
19 | */ | ||
20 | |||
21 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
22 | #include <asm/unaligned.h> | ||
23 | #include <asm/byteorder.h> | ||
24 | #endif | ||
25 | |||
26 | #ifndef ASMINF | 11 | #ifndef ASMINF |
27 | 12 | ||
28 | /* Allow machine dependent optimization for post-increment or pre-increment. | 13 | /* Allow machine dependent optimization for post-increment or pre-increment. |
@@ -36,14 +21,31 @@ | |||
36 | - Pentium III (Anderson) | 21 | - Pentium III (Anderson) |
37 | - M68060 (Nikl) | 22 | - M68060 (Nikl) |
38 | */ | 23 | */ |
24 | union uu { | ||
25 | unsigned short us; | ||
26 | unsigned char b[2]; | ||
27 | }; | ||
28 | |||
29 | /* Endian independed version */ | ||
30 | static inline unsigned short | ||
31 | get_unaligned16(const unsigned short *p) | ||
32 | { | ||
33 | union uu mm; | ||
34 | unsigned char *b = (unsigned char *)p; | ||
35 | |||
36 | mm.b[0] = b[0]; | ||
37 | mm.b[1] = b[1]; | ||
38 | return mm.us; | ||
39 | } | ||
40 | |||
39 | #ifdef POSTINC | 41 | #ifdef POSTINC |
40 | # define OFF 0 | 42 | # define OFF 0 |
41 | # define PUP(a) *(a)++ | 43 | # define PUP(a) *(a)++ |
42 | # define UP_UNALIGNED(a) get_unaligned((a)++) | 44 | # define UP_UNALIGNED(a) get_unaligned16((a)++) |
43 | #else | 45 | #else |
44 | # define OFF 1 | 46 | # define OFF 1 |
45 | # define PUP(a) *++(a) | 47 | # define PUP(a) *++(a) |
46 | # define UP_UNALIGNED(a) get_unaligned(++(a)) | 48 | # define UP_UNALIGNED(a) get_unaligned16(++(a)) |
47 | #endif | 49 | #endif |
48 | 50 | ||
49 | /* | 51 | /* |
@@ -256,7 +258,6 @@ void inflate_fast(z_streamp strm, unsigned start) | |||
256 | } | 258 | } |
257 | } | 259 | } |
258 | else { | 260 | else { |
259 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
260 | unsigned short *sout; | 261 | unsigned short *sout; |
261 | unsigned long loops; | 262 | unsigned long loops; |
262 | 263 | ||
@@ -274,22 +275,25 @@ void inflate_fast(z_streamp strm, unsigned start) | |||
274 | sfrom = (unsigned short *)(from - OFF); | 275 | sfrom = (unsigned short *)(from - OFF); |
275 | loops = len >> 1; | 276 | loops = len >> 1; |
276 | do | 277 | do |
278 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | ||
279 | PUP(sout) = PUP(sfrom); | ||
280 | #else | ||
277 | PUP(sout) = UP_UNALIGNED(sfrom); | 281 | PUP(sout) = UP_UNALIGNED(sfrom); |
282 | #endif | ||
278 | while (--loops); | 283 | while (--loops); |
279 | out = (unsigned char *)sout + OFF; | 284 | out = (unsigned char *)sout + OFF; |
280 | from = (unsigned char *)sfrom + OFF; | 285 | from = (unsigned char *)sfrom + OFF; |
281 | } else { /* dist == 1 or dist == 2 */ | 286 | } else { /* dist == 1 or dist == 2 */ |
282 | unsigned short pat16; | 287 | unsigned short pat16; |
283 | 288 | ||
284 | pat16 = *(sout-2+2*OFF); | 289 | pat16 = *(sout-1+OFF); |
285 | if (dist == 1) | 290 | if (dist == 1) { |
286 | #if defined(__BIG_ENDIAN) | 291 | union uu mm; |
287 | pat16 = (pat16 & 0xff) | ((pat16 & 0xff) << 8); | 292 | /* copy one char pattern to both bytes */ |
288 | #elif defined(__LITTLE_ENDIAN) | 293 | mm.us = pat16; |
289 | pat16 = (pat16 & 0xff00) | ((pat16 & 0xff00) >> 8); | 294 | mm.b[0] = mm.b[1]; |
290 | #else | 295 | pat16 = mm.us; |
291 | #error __BIG_ENDIAN nor __LITTLE_ENDIAN is defined | 296 | } |
292 | #endif | ||
293 | loops = len >> 1; | 297 | loops = len >> 1; |
294 | do | 298 | do |
295 | PUP(sout) = pat16; | 299 | PUP(sout) = pat16; |
@@ -298,20 +302,6 @@ void inflate_fast(z_streamp strm, unsigned start) | |||
298 | } | 302 | } |
299 | if (len & 1) | 303 | if (len & 1) |
300 | PUP(out) = PUP(from); | 304 | PUP(out) = PUP(from); |
301 | #else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ | ||
302 | from = out - dist; /* copy direct from output */ | ||
303 | do { /* minimum length is three */ | ||
304 | PUP(out) = PUP(from); | ||
305 | PUP(out) = PUP(from); | ||
306 | PUP(out) = PUP(from); | ||
307 | len -= 3; | ||
308 | } while (len > 2); | ||
309 | if (len) { | ||
310 | PUP(out) = PUP(from); | ||
311 | if (len > 1) | ||
312 | PUP(out) = PUP(from); | ||
313 | } | ||
314 | #endif /* !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ | ||
315 | } | 305 | } |
316 | } | 306 | } |
317 | else if ((op & 64) == 0) { /* 2nd level distance code */ | 307 | else if ((op & 64) == 0) { /* 2nd level distance code */ |