diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 32 | ||||
-rw-r--r-- | lib/bitmap.c | 3 | ||||
-rw-r--r-- | lib/debug_locks.c | 2 | ||||
-rw-r--r-- | lib/div64.c | 52 | ||||
-rw-r--r-- | lib/idr.c | 65 | ||||
-rw-r--r-- | lib/list_sort.c | 172 | ||||
-rw-r--r-- | lib/parser.c | 7 | ||||
-rw-r--r-- | lib/percpu_counter.c | 55 | ||||
-rw-r--r-- | lib/radix-tree.c | 83 | ||||
-rw-r--r-- | lib/vsprintf.c | 19 |
10 files changed, 360 insertions, 130 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 69a32664c289..28b42b9274d0 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -317,6 +317,14 @@ config DEBUG_OBJECTS_RCU_HEAD | |||
317 | help | 317 | help |
318 | Enable this to turn on debugging of RCU list heads (call_rcu() usage). | 318 | Enable this to turn on debugging of RCU list heads (call_rcu() usage). |
319 | 319 | ||
320 | config DEBUG_OBJECTS_PERCPU_COUNTER | ||
321 | bool "Debug percpu counter objects" | ||
322 | depends on DEBUG_OBJECTS | ||
323 | help | ||
324 | If you say Y here, additional code will be inserted into the | ||
325 | percpu counter routines to track the life time of percpu counter | ||
326 | objects and validate the percpu counter operations. | ||
327 | |||
320 | config DEBUG_OBJECTS_ENABLE_DEFAULT | 328 | config DEBUG_OBJECTS_ENABLE_DEFAULT |
321 | int "debug_objects bootup default value (0-1)" | 329 | int "debug_objects bootup default value (0-1)" |
322 | range 0 1 | 330 | range 0 1 |
@@ -366,7 +374,7 @@ config SLUB_STATS | |||
366 | config DEBUG_KMEMLEAK | 374 | config DEBUG_KMEMLEAK |
367 | bool "Kernel memory leak detector" | 375 | bool "Kernel memory leak detector" |
368 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ | 376 | depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ |
369 | (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE) | 377 | (X86 || ARM || PPC || S390 || SPARC64 || SUPERH || MICROBLAZE || TILE) |
370 | 378 | ||
371 | select DEBUG_FS if SYSFS | 379 | select DEBUG_FS if SYSFS |
372 | select STACKTRACE if STACKTRACE_SUPPORT | 380 | select STACKTRACE if STACKTRACE_SUPPORT |
@@ -740,6 +748,15 @@ config DEBUG_LIST | |||
740 | 748 | ||
741 | If unsure, say N. | 749 | If unsure, say N. |
742 | 750 | ||
751 | config TEST_LIST_SORT | ||
752 | bool "Linked list sorting test" | ||
753 | depends on DEBUG_KERNEL | ||
754 | help | ||
755 | Enable this to turn on 'list_sort()' function test. This test is | ||
756 | executed only once during system boot, so affects only boot time. | ||
757 | |||
758 | If unsure, say N. | ||
759 | |||
743 | config DEBUG_SG | 760 | config DEBUG_SG |
744 | bool "Debug SG table operations" | 761 | bool "Debug SG table operations" |
745 | depends on DEBUG_KERNEL | 762 | depends on DEBUG_KERNEL |
@@ -1200,6 +1217,19 @@ config ATOMIC64_SELFTEST | |||
1200 | 1217 | ||
1201 | If unsure, say N. | 1218 | If unsure, say N. |
1202 | 1219 | ||
1220 | config ASYNC_RAID6_TEST | ||
1221 | tristate "Self test for hardware accelerated raid6 recovery" | ||
1222 | depends on ASYNC_RAID6_RECOV | ||
1223 | select ASYNC_MEMCPY | ||
1224 | ---help--- | ||
1225 | This is a one-shot self test that permutes through the | ||
1226 | recovery of all the possible two disk failure scenarios for a | ||
1227 | N-disk array. Recovery is performed with the asynchronous | ||
1228 | raid6 recovery routines, and will optionally use an offload | ||
1229 | engine if one is available. | ||
1230 | |||
1231 | If unsure, say N. | ||
1232 | |||
1203 | source "samples/Kconfig" | 1233 | source "samples/Kconfig" |
1204 | 1234 | ||
1205 | source "lib/Kconfig.kgdb" | 1235 | source "lib/Kconfig.kgdb" |
diff --git a/lib/bitmap.c b/lib/bitmap.c index ffb78c916ccd..741fae905ae3 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -359,7 +359,6 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area); | |||
359 | 359 | ||
360 | #define CHUNKSZ 32 | 360 | #define CHUNKSZ 32 |
361 | #define nbits_to_hold_value(val) fls(val) | 361 | #define nbits_to_hold_value(val) fls(val) |
362 | #define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10)) | ||
363 | #define BASEDEC 10 /* fancier cpuset lists input in decimal */ | 362 | #define BASEDEC 10 /* fancier cpuset lists input in decimal */ |
364 | 363 | ||
365 | /** | 364 | /** |
@@ -466,7 +465,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, | |||
466 | if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1)) | 465 | if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1)) |
467 | return -EOVERFLOW; | 466 | return -EOVERFLOW; |
468 | 467 | ||
469 | chunk = (chunk << 4) | unhex(c); | 468 | chunk = (chunk << 4) | hex_to_bin(c); |
470 | ndigits++; totaldigits++; | 469 | ndigits++; totaldigits++; |
471 | } | 470 | } |
472 | if (ndigits == 0) | 471 | if (ndigits == 0) |
diff --git a/lib/debug_locks.c b/lib/debug_locks.c index 5bf0020b9248..b1c177307677 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c | |||
@@ -8,7 +8,6 @@ | |||
8 | * | 8 | * |
9 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 9 | * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
10 | */ | 10 | */ |
11 | #include <linux/kernel.h> | ||
12 | #include <linux/rwsem.h> | 11 | #include <linux/rwsem.h> |
13 | #include <linux/mutex.h> | 12 | #include <linux/mutex.h> |
14 | #include <linux/module.h> | 13 | #include <linux/module.h> |
@@ -39,7 +38,6 @@ int debug_locks_off(void) | |||
39 | { | 38 | { |
40 | if (__debug_locks_off()) { | 39 | if (__debug_locks_off()) { |
41 | if (!debug_locks_silent) { | 40 | if (!debug_locks_silent) { |
42 | oops_in_progress = 1; | ||
43 | console_verbose(); | 41 | console_verbose(); |
44 | return 1; | 42 | return 1; |
45 | } | 43 | } |
diff --git a/lib/div64.c b/lib/div64.c index a111eb8de9cf..5b4919191778 100644 --- a/lib/div64.c +++ b/lib/div64.c | |||
@@ -77,26 +77,58 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) | |||
77 | EXPORT_SYMBOL(div_s64_rem); | 77 | EXPORT_SYMBOL(div_s64_rem); |
78 | #endif | 78 | #endif |
79 | 79 | ||
80 | /* 64bit divisor, dividend and result. dynamic precision */ | 80 | /** |
81 | * div64_u64 - unsigned 64bit divide with 64bit divisor | ||
82 | * @dividend: 64bit dividend | ||
83 | * @divisor: 64bit divisor | ||
84 | * | ||
85 | * This implementation is a modified version of the algorithm proposed | ||
86 | * by the book 'Hacker's Delight'. The original source and full proof | ||
87 | * can be found here and is available for use without restriction. | ||
88 | * | ||
89 | * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c' | ||
90 | */ | ||
81 | #ifndef div64_u64 | 91 | #ifndef div64_u64 |
82 | u64 div64_u64(u64 dividend, u64 divisor) | 92 | u64 div64_u64(u64 dividend, u64 divisor) |
83 | { | 93 | { |
84 | u32 high, d; | 94 | u32 high = divisor >> 32; |
95 | u64 quot; | ||
85 | 96 | ||
86 | high = divisor >> 32; | 97 | if (high == 0) { |
87 | if (high) { | 98 | quot = div_u64(dividend, divisor); |
88 | unsigned int shift = fls(high); | 99 | } else { |
100 | int n = 1 + fls(high); | ||
101 | quot = div_u64(dividend >> n, divisor >> n); | ||
89 | 102 | ||
90 | d = divisor >> shift; | 103 | if (quot != 0) |
91 | dividend >>= shift; | 104 | quot--; |
92 | } else | 105 | if ((dividend - quot * divisor) >= divisor) |
93 | d = divisor; | 106 | quot++; |
107 | } | ||
94 | 108 | ||
95 | return div_u64(dividend, d); | 109 | return quot; |
96 | } | 110 | } |
97 | EXPORT_SYMBOL(div64_u64); | 111 | EXPORT_SYMBOL(div64_u64); |
98 | #endif | 112 | #endif |
99 | 113 | ||
114 | /** | ||
115 | * div64_s64 - signed 64bit divide with 64bit divisor | ||
116 | * @dividend: 64bit dividend | ||
117 | * @divisor: 64bit divisor | ||
118 | */ | ||
119 | #ifndef div64_s64 | ||
120 | s64 div64_s64(s64 dividend, s64 divisor) | ||
121 | { | ||
122 | s64 quot, t; | ||
123 | |||
124 | quot = div64_u64(abs64(dividend), abs64(divisor)); | ||
125 | t = (dividend ^ divisor) >> 63; | ||
126 | |||
127 | return (quot ^ t) - t; | ||
128 | } | ||
129 | EXPORT_SYMBOL(div64_s64); | ||
130 | #endif | ||
131 | |||
100 | #endif /* BITS_PER_LONG == 32 */ | 132 | #endif /* BITS_PER_LONG == 32 */ |
101 | 133 | ||
102 | /* | 134 | /* |
@@ -106,16 +106,17 @@ static void idr_mark_full(struct idr_layer **pa, int id) | |||
106 | } | 106 | } |
107 | 107 | ||
108 | /** | 108 | /** |
109 | * idr_pre_get - reserver resources for idr allocation | 109 | * idr_pre_get - reserve resources for idr allocation |
110 | * @idp: idr handle | 110 | * @idp: idr handle |
111 | * @gfp_mask: memory allocation flags | 111 | * @gfp_mask: memory allocation flags |
112 | * | 112 | * |
113 | * This function should be called prior to locking and calling the | 113 | * This function should be called prior to calling the idr_get_new* functions. |
114 | * idr_get_new* functions. It preallocates enough memory to satisfy | 114 | * It preallocates enough memory to satisfy the worst possible allocation. The |
115 | * the worst possible allocation. | 115 | * caller should pass in GFP_KERNEL if possible. This of course requires that |
116 | * no spinning locks be held. | ||
116 | * | 117 | * |
117 | * If the system is REALLY out of memory this function returns 0, | 118 | * If the system is REALLY out of memory this function returns %0, |
118 | * otherwise 1. | 119 | * otherwise %1. |
119 | */ | 120 | */ |
120 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | 121 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
121 | { | 122 | { |
@@ -290,11 +291,13 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |||
290 | * This is the allocate id function. It should be called with any | 291 | * This is the allocate id function. It should be called with any |
291 | * required locks. | 292 | * required locks. |
292 | * | 293 | * |
293 | * If memory is required, it will return -EAGAIN, you should unlock | 294 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
294 | * and go back to the idr_pre_get() call. If the idr is full, it will | 295 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
295 | * return -ENOSPC. | 296 | * IDR's preallocation and then retry the idr_get_new_above() call. |
296 | * | 297 | * |
297 | * @id returns a value in the range @starting_id ... 0x7fffffff | 298 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
299 | * | ||
300 | * @id returns a value in the range @starting_id ... %0x7fffffff | ||
298 | */ | 301 | */ |
299 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | 302 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
300 | { | 303 | { |
@@ -318,14 +321,13 @@ EXPORT_SYMBOL(idr_get_new_above); | |||
318 | * @ptr: pointer you want associated with the id | 321 | * @ptr: pointer you want associated with the id |
319 | * @id: pointer to the allocated handle | 322 | * @id: pointer to the allocated handle |
320 | * | 323 | * |
321 | * This is the allocate id function. It should be called with any | 324 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
322 | * required locks. | 325 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
326 | * IDR's preallocation and then retry the idr_get_new_above() call. | ||
323 | * | 327 | * |
324 | * If memory is required, it will return -EAGAIN, you should unlock | 328 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
325 | * and go back to the idr_pre_get() call. If the idr is full, it will | ||
326 | * return -ENOSPC. | ||
327 | * | 329 | * |
328 | * @id returns a value in the range 0 ... 0x7fffffff | 330 | * @id returns a value in the range %0 ... %0x7fffffff |
329 | */ | 331 | */ |
330 | int idr_get_new(struct idr *idp, void *ptr, int *id) | 332 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
331 | { | 333 | { |
@@ -388,7 +390,7 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
388 | } | 390 | } |
389 | 391 | ||
390 | /** | 392 | /** |
391 | * idr_remove - remove the given id and free it's slot | 393 | * idr_remove - remove the given id and free its slot |
392 | * @idp: idr handle | 394 | * @idp: idr handle |
393 | * @id: unique key | 395 | * @id: unique key |
394 | */ | 396 | */ |
@@ -437,7 +439,7 @@ EXPORT_SYMBOL(idr_remove); | |||
437 | * function will remove all id mappings and leave all idp_layers | 439 | * function will remove all id mappings and leave all idp_layers |
438 | * unused. | 440 | * unused. |
439 | * | 441 | * |
440 | * A typical clean-up sequence for objects stored in an idr tree, will | 442 | * A typical clean-up sequence for objects stored in an idr tree will |
441 | * use idr_for_each() to free all objects, if necessay, then | 443 | * use idr_for_each() to free all objects, if necessay, then |
442 | * idr_remove_all() to remove all ids, and idr_destroy() to free | 444 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
443 | * up the cached idr_layers. | 445 | * up the cached idr_layers. |
@@ -542,7 +544,7 @@ EXPORT_SYMBOL(idr_find); | |||
542 | * not allowed. | 544 | * not allowed. |
543 | * | 545 | * |
544 | * We check the return of @fn each time. If it returns anything other | 546 | * We check the return of @fn each time. If it returns anything other |
545 | * than 0, we break out and return that value. | 547 | * than %0, we break out and return that value. |
546 | * | 548 | * |
547 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). | 549 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
548 | */ | 550 | */ |
@@ -637,8 +639,8 @@ EXPORT_SYMBOL(idr_get_next); | |||
637 | * @id: lookup key | 639 | * @id: lookup key |
638 | * | 640 | * |
639 | * Replace the pointer registered with an id and return the old value. | 641 | * Replace the pointer registered with an id and return the old value. |
640 | * A -ENOENT return indicates that @id was not found. | 642 | * A %-ENOENT return indicates that @id was not found. |
641 | * A -EINVAL return indicates that @id was not within valid constraints. | 643 | * A %-EINVAL return indicates that @id was not within valid constraints. |
642 | * | 644 | * |
643 | * The caller must serialize with writers. | 645 | * The caller must serialize with writers. |
644 | */ | 646 | */ |
@@ -696,10 +698,11 @@ void idr_init(struct idr *idp) | |||
696 | EXPORT_SYMBOL(idr_init); | 698 | EXPORT_SYMBOL(idr_init); |
697 | 699 | ||
698 | 700 | ||
699 | /* | 701 | /** |
702 | * DOC: IDA description | ||
700 | * IDA - IDR based ID allocator | 703 | * IDA - IDR based ID allocator |
701 | * | 704 | * |
702 | * this is id allocator without id -> pointer translation. Memory | 705 | * This is id allocator without id -> pointer translation. Memory |
703 | * usage is much lower than full blown idr because each id only | 706 | * usage is much lower than full blown idr because each id only |
704 | * occupies a bit. ida uses a custom leaf node which contains | 707 | * occupies a bit. ida uses a custom leaf node which contains |
705 | * IDA_BITMAP_BITS slots. | 708 | * IDA_BITMAP_BITS slots. |
@@ -732,8 +735,8 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | |||
732 | * following function. It preallocates enough memory to satisfy the | 735 | * following function. It preallocates enough memory to satisfy the |
733 | * worst possible allocation. | 736 | * worst possible allocation. |
734 | * | 737 | * |
735 | * If the system is REALLY out of memory this function returns 0, | 738 | * If the system is REALLY out of memory this function returns %0, |
736 | * otherwise 1. | 739 | * otherwise %1. |
737 | */ | 740 | */ |
738 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | 741 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
739 | { | 742 | { |
@@ -765,11 +768,11 @@ EXPORT_SYMBOL(ida_pre_get); | |||
765 | * Allocate new ID above or equal to @ida. It should be called with | 768 | * Allocate new ID above or equal to @ida. It should be called with |
766 | * any required locks. | 769 | * any required locks. |
767 | * | 770 | * |
768 | * If memory is required, it will return -EAGAIN, you should unlock | 771 | * If memory is required, it will return %-EAGAIN, you should unlock |
769 | * and go back to the ida_pre_get() call. If the ida is full, it will | 772 | * and go back to the ida_pre_get() call. If the ida is full, it will |
770 | * return -ENOSPC. | 773 | * return %-ENOSPC. |
771 | * | 774 | * |
772 | * @p_id returns a value in the range @starting_id ... 0x7fffffff. | 775 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
773 | */ | 776 | */ |
774 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | 777 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
775 | { | 778 | { |
@@ -851,11 +854,11 @@ EXPORT_SYMBOL(ida_get_new_above); | |||
851 | * | 854 | * |
852 | * Allocate new ID. It should be called with any required locks. | 855 | * Allocate new ID. It should be called with any required locks. |
853 | * | 856 | * |
854 | * If memory is required, it will return -EAGAIN, you should unlock | 857 | * If memory is required, it will return %-EAGAIN, you should unlock |
855 | * and go back to the idr_pre_get() call. If the idr is full, it will | 858 | * and go back to the idr_pre_get() call. If the idr is full, it will |
856 | * return -ENOSPC. | 859 | * return %-ENOSPC. |
857 | * | 860 | * |
858 | * @id returns a value in the range 0 ... 0x7fffffff. | 861 | * @id returns a value in the range %0 ... %0x7fffffff. |
859 | */ | 862 | */ |
860 | int ida_get_new(struct ida *ida, int *p_id) | 863 | int ida_get_new(struct ida *ida, int *p_id) |
861 | { | 864 | { |
diff --git a/lib/list_sort.c b/lib/list_sort.c index a7616fa3162e..d7325c6b103f 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c | |||
@@ -141,77 +141,151 @@ void list_sort(void *priv, struct list_head *head, | |||
141 | } | 141 | } |
142 | EXPORT_SYMBOL(list_sort); | 142 | EXPORT_SYMBOL(list_sort); |
143 | 143 | ||
144 | #ifdef DEBUG_LIST_SORT | 144 | #ifdef CONFIG_TEST_LIST_SORT |
145 | |||
146 | #include <linux/random.h> | ||
147 | |||
148 | /* | ||
149 | * The pattern of set bits in the list length determines which cases | ||
150 | * are hit in list_sort(). | ||
151 | */ | ||
152 | #define TEST_LIST_LEN (512+128+2) /* not including head */ | ||
153 | |||
154 | #define TEST_POISON1 0xDEADBEEF | ||
155 | #define TEST_POISON2 0xA324354C | ||
156 | |||
145 | struct debug_el { | 157 | struct debug_el { |
146 | struct list_head l_h; | 158 | unsigned int poison1; |
159 | struct list_head list; | ||
160 | unsigned int poison2; | ||
147 | int value; | 161 | int value; |
148 | unsigned serial; | 162 | unsigned serial; |
149 | }; | 163 | }; |
150 | 164 | ||
151 | static int cmp(void *priv, struct list_head *a, struct list_head *b) | 165 | /* Array, containing pointers to all elements in the test list */ |
166 | static struct debug_el **elts __initdata; | ||
167 | |||
168 | static int __init check(struct debug_el *ela, struct debug_el *elb) | ||
152 | { | 169 | { |
153 | return container_of(a, struct debug_el, l_h)->value | 170 | if (ela->serial >= TEST_LIST_LEN) { |
154 | - container_of(b, struct debug_el, l_h)->value; | 171 | printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n", |
172 | ela->serial); | ||
173 | return -EINVAL; | ||
174 | } | ||
175 | if (elb->serial >= TEST_LIST_LEN) { | ||
176 | printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n", | ||
177 | elb->serial); | ||
178 | return -EINVAL; | ||
179 | } | ||
180 | if (elts[ela->serial] != ela || elts[elb->serial] != elb) { | ||
181 | printk(KERN_ERR "list_sort_test: error: phantom element\n"); | ||
182 | return -EINVAL; | ||
183 | } | ||
184 | if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) { | ||
185 | printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n", | ||
186 | ela->poison1, ela->poison2); | ||
187 | return -EINVAL; | ||
188 | } | ||
189 | if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) { | ||
190 | printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n", | ||
191 | elb->poison1, elb->poison2); | ||
192 | return -EINVAL; | ||
193 | } | ||
194 | return 0; | ||
155 | } | 195 | } |
156 | 196 | ||
157 | /* | 197 | static int __init cmp(void *priv, struct list_head *a, struct list_head *b) |
158 | * The pattern of set bits in the list length determines which cases | 198 | { |
159 | * are hit in list_sort(). | 199 | struct debug_el *ela, *elb; |
160 | */ | 200 | |
161 | #define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */ | 201 | ela = container_of(a, struct debug_el, list); |
202 | elb = container_of(b, struct debug_el, list); | ||
203 | |||
204 | check(ela, elb); | ||
205 | return ela->value - elb->value; | ||
206 | } | ||
162 | 207 | ||
163 | static int __init list_sort_test(void) | 208 | static int __init list_sort_test(void) |
164 | { | 209 | { |
165 | int i, r = 1, count; | 210 | int i, count = 1, err = -EINVAL; |
166 | struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL); | 211 | struct debug_el *el; |
167 | struct list_head *cur; | 212 | struct list_head *cur, *tmp; |
213 | LIST_HEAD(head); | ||
214 | |||
215 | printk(KERN_DEBUG "list_sort_test: start testing list_sort()\n"); | ||
168 | 216 | ||
169 | printk(KERN_WARNING "testing list_sort()\n"); | 217 | elts = kmalloc(sizeof(void *) * TEST_LIST_LEN, GFP_KERNEL); |
218 | if (!elts) { | ||
219 | printk(KERN_ERR "list_sort_test: error: cannot allocate " | ||
220 | "memory\n"); | ||
221 | goto exit; | ||
222 | } | ||
170 | 223 | ||
171 | cur = head; | 224 | for (i = 0; i < TEST_LIST_LEN; i++) { |
172 | for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) { | 225 | el = kmalloc(sizeof(*el), GFP_KERNEL); |
173 | struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL); | 226 | if (!el) { |
174 | BUG_ON(!el); | 227 | printk(KERN_ERR "list_sort_test: error: cannot " |
228 | "allocate memory\n"); | ||
229 | goto exit; | ||
230 | } | ||
175 | /* force some equivalencies */ | 231 | /* force some equivalencies */ |
176 | el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3); | 232 | el->value = random32() % (TEST_LIST_LEN/3); |
177 | el->serial = i; | 233 | el->serial = i; |
178 | 234 | el->poison1 = TEST_POISON1; | |
179 | el->l_h.prev = cur; | 235 | el->poison2 = TEST_POISON2; |
180 | cur->next = &el->l_h; | 236 | elts[i] = el; |
181 | cur = cur->next; | 237 | list_add_tail(&el->list, &head); |
182 | } | 238 | } |
183 | head->prev = cur; | ||
184 | 239 | ||
185 | list_sort(NULL, head, cmp); | 240 | list_sort(NULL, &head, cmp); |
241 | |||
242 | for (cur = head.next; cur->next != &head; cur = cur->next) { | ||
243 | struct debug_el *el1; | ||
244 | int cmp_result; | ||
186 | 245 | ||
187 | count = 1; | ||
188 | for (cur = head->next; cur->next != head; cur = cur->next) { | ||
189 | struct debug_el *el = container_of(cur, struct debug_el, l_h); | ||
190 | int cmp_result = cmp(NULL, cur, cur->next); | ||
191 | if (cur->next->prev != cur) { | 246 | if (cur->next->prev != cur) { |
192 | printk(KERN_EMERG "list_sort() returned " | 247 | printk(KERN_ERR "list_sort_test: error: list is " |
193 | "a corrupted list!\n"); | 248 | "corrupted\n"); |
194 | return 1; | 249 | goto exit; |
195 | } else if (cmp_result > 0) { | 250 | } |
196 | printk(KERN_EMERG "list_sort() failed to sort!\n"); | 251 | |
197 | return 1; | 252 | cmp_result = cmp(NULL, cur, cur->next); |
198 | } else if (cmp_result == 0 && | 253 | if (cmp_result > 0) { |
199 | el->serial >= container_of(cur->next, | 254 | printk(KERN_ERR "list_sort_test: error: list is not " |
200 | struct debug_el, l_h)->serial) { | 255 | "sorted\n"); |
201 | printk(KERN_EMERG "list_sort() failed to preserve order" | 256 | goto exit; |
202 | " of equivalent elements!\n"); | 257 | } |
203 | return 1; | 258 | |
259 | el = container_of(cur, struct debug_el, list); | ||
260 | el1 = container_of(cur->next, struct debug_el, list); | ||
261 | if (cmp_result == 0 && el->serial >= el1->serial) { | ||
262 | printk(KERN_ERR "list_sort_test: error: order of " | ||
263 | "equivalent elements not preserved\n"); | ||
264 | goto exit; | ||
265 | } | ||
266 | |||
267 | if (check(el, el1)) { | ||
268 | printk(KERN_ERR "list_sort_test: error: element check " | ||
269 | "failed\n"); | ||
270 | goto exit; | ||
204 | } | 271 | } |
205 | kfree(cur->prev); | ||
206 | count++; | 272 | count++; |
207 | } | 273 | } |
208 | kfree(cur); | 274 | |
209 | if (count != LIST_SORT_TEST_LENGTH) { | 275 | if (count != TEST_LIST_LEN) { |
210 | printk(KERN_EMERG "list_sort() returned list of" | 276 | printk(KERN_ERR "list_sort_test: error: bad list length %d", |
211 | "different length!\n"); | 277 | count); |
212 | return 1; | 278 | goto exit; |
213 | } | 279 | } |
214 | return 0; | 280 | |
281 | err = 0; | ||
282 | exit: | ||
283 | kfree(elts); | ||
284 | list_for_each_safe(cur, tmp, &head) { | ||
285 | list_del(cur); | ||
286 | kfree(container_of(cur, struct debug_el, list)); | ||
287 | } | ||
288 | return err; | ||
215 | } | 289 | } |
216 | module_init(list_sort_test); | 290 | module_init(list_sort_test); |
217 | #endif | 291 | #endif /* CONFIG_TEST_LIST_SORT */ |
diff --git a/lib/parser.c b/lib/parser.c index fb34977246bb..6e89eca5cca0 100644 --- a/lib/parser.c +++ b/lib/parser.c | |||
@@ -128,12 +128,13 @@ static int match_number(substring_t *s, int *result, int base) | |||
128 | char *endp; | 128 | char *endp; |
129 | char *buf; | 129 | char *buf; |
130 | int ret; | 130 | int ret; |
131 | size_t len = s->to - s->from; | ||
131 | 132 | ||
132 | buf = kmalloc(s->to - s->from + 1, GFP_KERNEL); | 133 | buf = kmalloc(len + 1, GFP_KERNEL); |
133 | if (!buf) | 134 | if (!buf) |
134 | return -ENOMEM; | 135 | return -ENOMEM; |
135 | memcpy(buf, s->from, s->to - s->from); | 136 | memcpy(buf, s->from, len); |
136 | buf[s->to - s->from] = '\0'; | 137 | buf[len] = '\0'; |
137 | *result = simple_strtol(buf, &endp, base); | 138 | *result = simple_strtol(buf, &endp, base); |
138 | ret = 0; | 139 | ret = 0; |
139 | if (endp == buf) | 140 | if (endp == buf) |
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index ec9048e74f44..604678d7d06d 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -8,10 +8,53 @@ | |||
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/cpu.h> | 9 | #include <linux/cpu.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/debugobjects.h> | ||
11 | 12 | ||
12 | static LIST_HEAD(percpu_counters); | 13 | static LIST_HEAD(percpu_counters); |
13 | static DEFINE_MUTEX(percpu_counters_lock); | 14 | static DEFINE_MUTEX(percpu_counters_lock); |
14 | 15 | ||
16 | #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER | ||
17 | |||
18 | static struct debug_obj_descr percpu_counter_debug_descr; | ||
19 | |||
20 | static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state) | ||
21 | { | ||
22 | struct percpu_counter *fbc = addr; | ||
23 | |||
24 | switch (state) { | ||
25 | case ODEBUG_STATE_ACTIVE: | ||
26 | percpu_counter_destroy(fbc); | ||
27 | debug_object_free(fbc, &percpu_counter_debug_descr); | ||
28 | return 1; | ||
29 | default: | ||
30 | return 0; | ||
31 | } | ||
32 | } | ||
33 | |||
34 | static struct debug_obj_descr percpu_counter_debug_descr = { | ||
35 | .name = "percpu_counter", | ||
36 | .fixup_free = percpu_counter_fixup_free, | ||
37 | }; | ||
38 | |||
39 | static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) | ||
40 | { | ||
41 | debug_object_init(fbc, &percpu_counter_debug_descr); | ||
42 | debug_object_activate(fbc, &percpu_counter_debug_descr); | ||
43 | } | ||
44 | |||
45 | static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) | ||
46 | { | ||
47 | debug_object_deactivate(fbc, &percpu_counter_debug_descr); | ||
48 | debug_object_free(fbc, &percpu_counter_debug_descr); | ||
49 | } | ||
50 | |||
51 | #else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ | ||
52 | static inline void debug_percpu_counter_activate(struct percpu_counter *fbc) | ||
53 | { } | ||
54 | static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc) | ||
55 | { } | ||
56 | #endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */ | ||
57 | |||
15 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount) | 58 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount) |
16 | { | 59 | { |
17 | int cpu; | 60 | int cpu; |
@@ -30,9 +73,9 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | |||
30 | { | 73 | { |
31 | s64 count; | 74 | s64 count; |
32 | s32 *pcount; | 75 | s32 *pcount; |
33 | int cpu = get_cpu(); | ||
34 | 76 | ||
35 | pcount = per_cpu_ptr(fbc->counters, cpu); | 77 | preempt_disable(); |
78 | pcount = this_cpu_ptr(fbc->counters); | ||
36 | count = *pcount + amount; | 79 | count = *pcount + amount; |
37 | if (count >= batch || count <= -batch) { | 80 | if (count >= batch || count <= -batch) { |
38 | spin_lock(&fbc->lock); | 81 | spin_lock(&fbc->lock); |
@@ -42,7 +85,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | |||
42 | } else { | 85 | } else { |
43 | *pcount = count; | 86 | *pcount = count; |
44 | } | 87 | } |
45 | put_cpu(); | 88 | preempt_enable(); |
46 | } | 89 | } |
47 | EXPORT_SYMBOL(__percpu_counter_add); | 90 | EXPORT_SYMBOL(__percpu_counter_add); |
48 | 91 | ||
@@ -75,7 +118,11 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | |||
75 | fbc->counters = alloc_percpu(s32); | 118 | fbc->counters = alloc_percpu(s32); |
76 | if (!fbc->counters) | 119 | if (!fbc->counters) |
77 | return -ENOMEM; | 120 | return -ENOMEM; |
121 | |||
122 | debug_percpu_counter_activate(fbc); | ||
123 | |||
78 | #ifdef CONFIG_HOTPLUG_CPU | 124 | #ifdef CONFIG_HOTPLUG_CPU |
125 | INIT_LIST_HEAD(&fbc->list); | ||
79 | mutex_lock(&percpu_counters_lock); | 126 | mutex_lock(&percpu_counters_lock); |
80 | list_add(&fbc->list, &percpu_counters); | 127 | list_add(&fbc->list, &percpu_counters); |
81 | mutex_unlock(&percpu_counters_lock); | 128 | mutex_unlock(&percpu_counters_lock); |
@@ -89,6 +136,8 @@ void percpu_counter_destroy(struct percpu_counter *fbc) | |||
89 | if (!fbc->counters) | 136 | if (!fbc->counters) |
90 | return; | 137 | return; |
91 | 138 | ||
139 | debug_percpu_counter_deactivate(fbc); | ||
140 | |||
92 | #ifdef CONFIG_HOTPLUG_CPU | 141 | #ifdef CONFIG_HOTPLUG_CPU |
93 | mutex_lock(&percpu_counters_lock); | 142 | mutex_lock(&percpu_counters_lock); |
94 | list_del(&fbc->list); | 143 | list_del(&fbc->list); |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 6f412ab4c24f..5086bb962b4d 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -82,6 +82,16 @@ struct radix_tree_preload { | |||
82 | }; | 82 | }; |
83 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; | 83 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; |
84 | 84 | ||
85 | static inline void *ptr_to_indirect(void *ptr) | ||
86 | { | ||
87 | return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR); | ||
88 | } | ||
89 | |||
90 | static inline void *indirect_to_ptr(void *ptr) | ||
91 | { | ||
92 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); | ||
93 | } | ||
94 | |||
85 | static inline gfp_t root_gfp_mask(struct radix_tree_root *root) | 95 | static inline gfp_t root_gfp_mask(struct radix_tree_root *root) |
86 | { | 96 | { |
87 | return root->gfp_mask & __GFP_BITS_MASK; | 97 | return root->gfp_mask & __GFP_BITS_MASK; |
@@ -265,7 +275,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) | |||
265 | return -ENOMEM; | 275 | return -ENOMEM; |
266 | 276 | ||
267 | /* Increase the height. */ | 277 | /* Increase the height. */ |
268 | node->slots[0] = radix_tree_indirect_to_ptr(root->rnode); | 278 | node->slots[0] = indirect_to_ptr(root->rnode); |
269 | 279 | ||
270 | /* Propagate the aggregated tag info into the new root */ | 280 | /* Propagate the aggregated tag info into the new root */ |
271 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { | 281 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { |
@@ -276,7 +286,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) | |||
276 | newheight = root->height+1; | 286 | newheight = root->height+1; |
277 | node->height = newheight; | 287 | node->height = newheight; |
278 | node->count = 1; | 288 | node->count = 1; |
279 | node = radix_tree_ptr_to_indirect(node); | 289 | node = ptr_to_indirect(node); |
280 | rcu_assign_pointer(root->rnode, node); | 290 | rcu_assign_pointer(root->rnode, node); |
281 | root->height = newheight; | 291 | root->height = newheight; |
282 | } while (height > root->height); | 292 | } while (height > root->height); |
@@ -309,7 +319,7 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
309 | return error; | 319 | return error; |
310 | } | 320 | } |
311 | 321 | ||
312 | slot = radix_tree_indirect_to_ptr(root->rnode); | 322 | slot = indirect_to_ptr(root->rnode); |
313 | 323 | ||
314 | height = root->height; | 324 | height = root->height; |
315 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; | 325 | shift = (height-1) * RADIX_TREE_MAP_SHIFT; |
@@ -325,8 +335,7 @@ int radix_tree_insert(struct radix_tree_root *root, | |||
325 | rcu_assign_pointer(node->slots[offset], slot); | 335 | rcu_assign_pointer(node->slots[offset], slot); |
326 | node->count++; | 336 | node->count++; |
327 | } else | 337 | } else |
328 | rcu_assign_pointer(root->rnode, | 338 | rcu_assign_pointer(root->rnode, ptr_to_indirect(slot)); |
329 | radix_tree_ptr_to_indirect(slot)); | ||
330 | } | 339 | } |
331 | 340 | ||
332 | /* Go a level down */ | 341 | /* Go a level down */ |
@@ -374,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
374 | return NULL; | 383 | return NULL; |
375 | return is_slot ? (void *)&root->rnode : node; | 384 | return is_slot ? (void *)&root->rnode : node; |
376 | } | 385 | } |
377 | node = radix_tree_indirect_to_ptr(node); | 386 | node = indirect_to_ptr(node); |
378 | 387 | ||
379 | height = node->height; | 388 | height = node->height; |
380 | if (index > radix_tree_maxindex(height)) | 389 | if (index > radix_tree_maxindex(height)) |
@@ -393,7 +402,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, | |||
393 | height--; | 402 | height--; |
394 | } while (height > 0); | 403 | } while (height > 0); |
395 | 404 | ||
396 | return is_slot ? (void *)slot:node; | 405 | return is_slot ? (void *)slot : indirect_to_ptr(node); |
397 | } | 406 | } |
398 | 407 | ||
399 | /** | 408 | /** |
@@ -455,7 +464,7 @@ void *radix_tree_tag_set(struct radix_tree_root *root, | |||
455 | height = root->height; | 464 | height = root->height; |
456 | BUG_ON(index > radix_tree_maxindex(height)); | 465 | BUG_ON(index > radix_tree_maxindex(height)); |
457 | 466 | ||
458 | slot = radix_tree_indirect_to_ptr(root->rnode); | 467 | slot = indirect_to_ptr(root->rnode); |
459 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 468 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; |
460 | 469 | ||
461 | while (height > 0) { | 470 | while (height > 0) { |
@@ -509,7 +518,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, | |||
509 | 518 | ||
510 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 519 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; |
511 | pathp->node = NULL; | 520 | pathp->node = NULL; |
512 | slot = radix_tree_indirect_to_ptr(root->rnode); | 521 | slot = indirect_to_ptr(root->rnode); |
513 | 522 | ||
514 | while (height > 0) { | 523 | while (height > 0) { |
515 | int offset; | 524 | int offset; |
@@ -579,7 +588,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, | |||
579 | 588 | ||
580 | if (!radix_tree_is_indirect_ptr(node)) | 589 | if (!radix_tree_is_indirect_ptr(node)) |
581 | return (index == 0); | 590 | return (index == 0); |
582 | node = radix_tree_indirect_to_ptr(node); | 591 | node = indirect_to_ptr(node); |
583 | 592 | ||
584 | height = node->height; | 593 | height = node->height; |
585 | if (index > radix_tree_maxindex(height)) | 594 | if (index > radix_tree_maxindex(height)) |
@@ -666,7 +675,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | |||
666 | } | 675 | } |
667 | 676 | ||
668 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 677 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; |
669 | slot = radix_tree_indirect_to_ptr(root->rnode); | 678 | slot = indirect_to_ptr(root->rnode); |
670 | 679 | ||
671 | /* | 680 | /* |
672 | * we fill the path from (root->height - 2) to 0, leaving the index at | 681 | * we fill the path from (root->height - 2) to 0, leaving the index at |
@@ -897,7 +906,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
897 | results[0] = node; | 906 | results[0] = node; |
898 | return 1; | 907 | return 1; |
899 | } | 908 | } |
900 | node = radix_tree_indirect_to_ptr(node); | 909 | node = indirect_to_ptr(node); |
901 | 910 | ||
902 | max_index = radix_tree_maxindex(node->height); | 911 | max_index = radix_tree_maxindex(node->height); |
903 | 912 | ||
@@ -916,7 +925,8 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |||
916 | slot = *(((void ***)results)[ret + i]); | 925 | slot = *(((void ***)results)[ret + i]); |
917 | if (!slot) | 926 | if (!slot) |
918 | continue; | 927 | continue; |
919 | results[ret + nr_found] = rcu_dereference_raw(slot); | 928 | results[ret + nr_found] = |
929 | indirect_to_ptr(rcu_dereference_raw(slot)); | ||
920 | nr_found++; | 930 | nr_found++; |
921 | } | 931 | } |
922 | ret += nr_found; | 932 | ret += nr_found; |
@@ -965,7 +975,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, | |||
965 | results[0] = (void **)&root->rnode; | 975 | results[0] = (void **)&root->rnode; |
966 | return 1; | 976 | return 1; |
967 | } | 977 | } |
968 | node = radix_tree_indirect_to_ptr(node); | 978 | node = indirect_to_ptr(node); |
969 | 979 | ||
970 | max_index = radix_tree_maxindex(node->height); | 980 | max_index = radix_tree_maxindex(node->height); |
971 | 981 | ||
@@ -1090,7 +1100,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
1090 | results[0] = node; | 1100 | results[0] = node; |
1091 | return 1; | 1101 | return 1; |
1092 | } | 1102 | } |
1093 | node = radix_tree_indirect_to_ptr(node); | 1103 | node = indirect_to_ptr(node); |
1094 | 1104 | ||
1095 | max_index = radix_tree_maxindex(node->height); | 1105 | max_index = radix_tree_maxindex(node->height); |
1096 | 1106 | ||
@@ -1109,7 +1119,8 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |||
1109 | slot = *(((void ***)results)[ret + i]); | 1119 | slot = *(((void ***)results)[ret + i]); |
1110 | if (!slot) | 1120 | if (!slot) |
1111 | continue; | 1121 | continue; |
1112 | results[ret + nr_found] = rcu_dereference_raw(slot); | 1122 | results[ret + nr_found] = |
1123 | indirect_to_ptr(rcu_dereference_raw(slot)); | ||
1113 | nr_found++; | 1124 | nr_found++; |
1114 | } | 1125 | } |
1115 | ret += nr_found; | 1126 | ret += nr_found; |
@@ -1159,7 +1170,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |||
1159 | results[0] = (void **)&root->rnode; | 1170 | results[0] = (void **)&root->rnode; |
1160 | return 1; | 1171 | return 1; |
1161 | } | 1172 | } |
1162 | node = radix_tree_indirect_to_ptr(node); | 1173 | node = indirect_to_ptr(node); |
1163 | 1174 | ||
1164 | max_index = radix_tree_maxindex(node->height); | 1175 | max_index = radix_tree_maxindex(node->height); |
1165 | 1176 | ||
@@ -1195,7 +1206,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) | |||
1195 | void *newptr; | 1206 | void *newptr; |
1196 | 1207 | ||
1197 | BUG_ON(!radix_tree_is_indirect_ptr(to_free)); | 1208 | BUG_ON(!radix_tree_is_indirect_ptr(to_free)); |
1198 | to_free = radix_tree_indirect_to_ptr(to_free); | 1209 | to_free = indirect_to_ptr(to_free); |
1199 | 1210 | ||
1200 | /* | 1211 | /* |
1201 | * The candidate node has more than one child, or its child | 1212 | * The candidate node has more than one child, or its child |
@@ -1208,16 +1219,39 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) | |||
1208 | 1219 | ||
1209 | /* | 1220 | /* |
1210 | * We don't need rcu_assign_pointer(), since we are simply | 1221 | * We don't need rcu_assign_pointer(), since we are simply |
1211 | * moving the node from one part of the tree to another. If | 1222 | * moving the node from one part of the tree to another: if it |
1212 | * it was safe to dereference the old pointer to it | 1223 | * was safe to dereference the old pointer to it |
1213 | * (to_free->slots[0]), it will be safe to dereference the new | 1224 | * (to_free->slots[0]), it will be safe to dereference the new |
1214 | * one (root->rnode). | 1225 | * one (root->rnode) as far as dependent read barriers go. |
1215 | */ | 1226 | */ |
1216 | newptr = to_free->slots[0]; | 1227 | newptr = to_free->slots[0]; |
1217 | if (root->height > 1) | 1228 | if (root->height > 1) |
1218 | newptr = radix_tree_ptr_to_indirect(newptr); | 1229 | newptr = ptr_to_indirect(newptr); |
1219 | root->rnode = newptr; | 1230 | root->rnode = newptr; |
1220 | root->height--; | 1231 | root->height--; |
1232 | |||
1233 | /* | ||
1234 | * We have a dilemma here. The node's slot[0] must not be | ||
1235 | * NULLed in case there are concurrent lookups expecting to | ||
1236 | * find the item. However if this was a bottom-level node, | ||
1237 | * then it may be subject to the slot pointer being visible | ||
1238 | * to callers dereferencing it. If item corresponding to | ||
1239 | * slot[0] is subsequently deleted, these callers would expect | ||
1240 | * their slot to become empty sooner or later. | ||
1241 | * | ||
1242 | * For example, lockless pagecache will look up a slot, deref | ||
1243 | * the page pointer, and if the page is 0 refcount it means it | ||
1244 | * was concurrently deleted from pagecache so try the deref | ||
1245 | * again. Fortunately there is already a requirement for logic | ||
1246 | * to retry the entire slot lookup -- the indirect pointer | ||
1247 | * problem (replacing direct root node with an indirect pointer | ||
1248 | * also results in a stale slot). So tag the slot as indirect | ||
1249 | * to force callers to retry. | ||
1250 | */ | ||
1251 | if (root->height == 0) | ||
1252 | *((unsigned long *)&to_free->slots[0]) |= | ||
1253 | RADIX_TREE_INDIRECT_PTR; | ||
1254 | |||
1221 | radix_tree_node_free(to_free); | 1255 | radix_tree_node_free(to_free); |
1222 | } | 1256 | } |
1223 | } | 1257 | } |
@@ -1254,7 +1288,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |||
1254 | root->rnode = NULL; | 1288 | root->rnode = NULL; |
1255 | goto out; | 1289 | goto out; |
1256 | } | 1290 | } |
1257 | slot = radix_tree_indirect_to_ptr(slot); | 1291 | slot = indirect_to_ptr(slot); |
1258 | 1292 | ||
1259 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; | 1293 | shift = (height - 1) * RADIX_TREE_MAP_SHIFT; |
1260 | pathp->node = NULL; | 1294 | pathp->node = NULL; |
@@ -1296,8 +1330,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |||
1296 | radix_tree_node_free(to_free); | 1330 | radix_tree_node_free(to_free); |
1297 | 1331 | ||
1298 | if (pathp->node->count) { | 1332 | if (pathp->node->count) { |
1299 | if (pathp->node == | 1333 | if (pathp->node == indirect_to_ptr(root->rnode)) |
1300 | radix_tree_indirect_to_ptr(root->rnode)) | ||
1301 | radix_tree_shrink(root); | 1334 | radix_tree_shrink(root); |
1302 | goto out; | 1335 | goto out; |
1303 | } | 1336 | } |
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 7af9d841c43b..c150d3dafff4 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
@@ -988,8 +988,15 @@ static noinline_for_stack | |||
988 | char *pointer(const char *fmt, char *buf, char *end, void *ptr, | 988 | char *pointer(const char *fmt, char *buf, char *end, void *ptr, |
989 | struct printf_spec spec) | 989 | struct printf_spec spec) |
990 | { | 990 | { |
991 | if (!ptr) | 991 | if (!ptr) { |
992 | /* | ||
993 | * Print (null) with the same width as a pointer so it makes | ||
994 | * tabular output look nice. | ||
995 | */ | ||
996 | if (spec.field_width == -1) | ||
997 | spec.field_width = 2 * sizeof(void *); | ||
992 | return string(buf, end, "(null)", spec); | 998 | return string(buf, end, "(null)", spec); |
999 | } | ||
993 | 1000 | ||
994 | switch (*fmt) { | 1001 | switch (*fmt) { |
995 | case 'F': | 1002 | case 'F': |
@@ -1031,7 +1038,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
1031 | } | 1038 | } |
1032 | spec.flags |= SMALL; | 1039 | spec.flags |= SMALL; |
1033 | if (spec.field_width == -1) { | 1040 | if (spec.field_width == -1) { |
1034 | spec.field_width = 2*sizeof(void *); | 1041 | spec.field_width = 2 * sizeof(void *); |
1035 | spec.flags |= ZEROPAD; | 1042 | spec.flags |= ZEROPAD; |
1036 | } | 1043 | } |
1037 | spec.base = 16; | 1044 | spec.base = 16; |
@@ -1497,7 +1504,7 @@ EXPORT_SYMBOL(snprintf); | |||
1497 | * @...: Arguments for the format string | 1504 | * @...: Arguments for the format string |
1498 | * | 1505 | * |
1499 | * The return value is the number of characters written into @buf not including | 1506 | * The return value is the number of characters written into @buf not including |
1500 | * the trailing '\0'. If @size is <= 0 the function returns 0. | 1507 | * the trailing '\0'. If @size is == 0 the function returns 0. |
1501 | */ | 1508 | */ |
1502 | 1509 | ||
1503 | int scnprintf(char *buf, size_t size, const char *fmt, ...) | 1510 | int scnprintf(char *buf, size_t size, const char *fmt, ...) |
@@ -1509,7 +1516,11 @@ int scnprintf(char *buf, size_t size, const char *fmt, ...) | |||
1509 | i = vsnprintf(buf, size, fmt, args); | 1516 | i = vsnprintf(buf, size, fmt, args); |
1510 | va_end(args); | 1517 | va_end(args); |
1511 | 1518 | ||
1512 | return (i >= size) ? (size - 1) : i; | 1519 | if (likely(i < size)) |
1520 | return i; | ||
1521 | if (size != 0) | ||
1522 | return size - 1; | ||
1523 | return 0; | ||
1513 | } | 1524 | } |
1514 | EXPORT_SYMBOL(scnprintf); | 1525 | EXPORT_SYMBOL(scnprintf); |
1515 | 1526 | ||