aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug77
-rw-r--r--lib/bitmap.c3
-rw-r--r--lib/div64.c52
-rw-r--r--lib/dma-debug.c1
-rw-r--r--lib/dynamic_debug.c140
-rw-r--r--lib/idr.c37
-rw-r--r--lib/kobject.c39
-rw-r--r--lib/list_sort.c172
-rw-r--r--lib/parser.c7
-rw-r--r--lib/percpu_counter.c55
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/swiotlb.c18
-rw-r--r--lib/vsprintf.c19
13 files changed, 455 insertions, 167 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9f211b2642f5..995840664a5f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -317,6 +317,14 @@ config DEBUG_OBJECTS_RCU_HEAD
317 help 317 help
318 Enable this to turn on debugging of RCU list heads (call_rcu() usage). 318 Enable this to turn on debugging of RCU list heads (call_rcu() usage).
319 319
320config DEBUG_OBJECTS_PERCPU_COUNTER
321 bool "Debug percpu counter objects"
322 depends on DEBUG_OBJECTS
323 help
324 If you say Y here, additional code will be inserted into the
325 percpu counter routines to track the life time of percpu counter
326 objects and validate the percpu counter operations.
327
320config DEBUG_OBJECTS_ENABLE_DEFAULT 328config DEBUG_OBJECTS_ENABLE_DEFAULT
321 int "debug_objects bootup default value (0-1)" 329 int "debug_objects bootup default value (0-1)"
322 range 0 1 330 range 0 1
@@ -353,7 +361,7 @@ config SLUB_DEBUG_ON
353config SLUB_STATS 361config SLUB_STATS
354 default n 362 default n
355 bool "Enable SLUB performance statistics" 363 bool "Enable SLUB performance statistics"
356 depends on SLUB && SLUB_DEBUG && SYSFS 364 depends on SLUB && SYSFS
357 help 365 help
358 SLUB statistics are useful to debug SLUBs allocation behavior in 366 SLUB statistics are useful to debug SLUBs allocation behavior in
359 order find ways to optimize the allocator. This should never be 367 order find ways to optimize the allocator. This should never be
@@ -461,6 +469,15 @@ config DEBUG_MUTEXES
461 This feature allows mutex semantics violations to be detected and 469 This feature allows mutex semantics violations to be detected and
462 reported. 470 reported.
463 471
472config BKL
473 bool "Big Kernel Lock" if (SMP || PREEMPT)
474 default y
475 help
476 This is the traditional lock that is used in old code instead
477 of proper locking. All drivers that use the BKL should depend
478 on this symbol.
479 Say Y here unless you are working on removing the BKL.
480
464config DEBUG_LOCK_ALLOC 481config DEBUG_LOCK_ALLOC
465 bool "Lock debugging: detect incorrect freeing of live locks" 482 bool "Lock debugging: detect incorrect freeing of live locks"
466 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 483 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -482,6 +499,7 @@ config PROVE_LOCKING
482 select DEBUG_SPINLOCK 499 select DEBUG_SPINLOCK
483 select DEBUG_MUTEXES 500 select DEBUG_MUTEXES
484 select DEBUG_LOCK_ALLOC 501 select DEBUG_LOCK_ALLOC
502 select TRACE_IRQFLAGS
485 default n 503 default n
486 help 504 help
487 This feature enables the kernel to prove that all locking 505 This feature enables the kernel to prove that all locking
@@ -539,6 +557,23 @@ config PROVE_RCU_REPEATEDLY
539 disabling, allowing multiple RCU-lockdep warnings to be printed 557 disabling, allowing multiple RCU-lockdep warnings to be printed
540 on a single reboot. 558 on a single reboot.
541 559
560 Say Y to allow multiple RCU-lockdep warnings per boot.
561
562 Say N if you are unsure.
563
564config SPARSE_RCU_POINTER
565 bool "RCU debugging: sparse-based checks for pointer usage"
566 default n
567 help
568 This feature enables the __rcu sparse annotation for
569 RCU-protected pointers. This annotation will cause sparse
570 to flag any non-RCU used of annotated pointers. This can be
571 helpful when debugging RCU usage. Please note that this feature
572 is not intended to enforce code cleanliness; it is instead merely
573 a debugging aid.
574
575 Say Y to make sparse flag questionable use of RCU-protected pointers
576
542 Say N if you are unsure. 577 Say N if you are unsure.
543 578
544config LOCKDEP 579config LOCKDEP
@@ -579,11 +614,10 @@ config DEBUG_LOCKDEP
579 of more runtime overhead. 614 of more runtime overhead.
580 615
581config TRACE_IRQFLAGS 616config TRACE_IRQFLAGS
582 depends on DEBUG_KERNEL
583 bool 617 bool
584 default y 618 help
585 depends on TRACE_IRQFLAGS_SUPPORT 619 Enables hooks to interrupt enabling and disabling for
586 depends on PROVE_LOCKING 620 either tracing or lock debugging.
587 621
588config DEBUG_SPINLOCK_SLEEP 622config DEBUG_SPINLOCK_SLEEP
589 bool "Spinlock debugging: sleep-inside-spinlock checking" 623 bool "Spinlock debugging: sleep-inside-spinlock checking"
@@ -714,6 +748,15 @@ config DEBUG_LIST
714 748
715 If unsure, say N. 749 If unsure, say N.
716 750
751config TEST_LIST_SORT
752 bool "Linked list sorting test"
753 depends on DEBUG_KERNEL
754 help
755 Enable this to turn on 'list_sort()' function test. This test is
756 executed only once during system boot, so affects only boot time.
757
758 If unsure, say N.
759
717config DEBUG_SG 760config DEBUG_SG
718 bool "Debug SG table operations" 761 bool "Debug SG table operations"
719 depends on DEBUG_KERNEL 762 depends on DEBUG_KERNEL
@@ -832,6 +875,30 @@ config RCU_CPU_STALL_DETECTOR
832 875
833 Say Y if you are unsure. 876 Say Y if you are unsure.
834 877
878config RCU_CPU_STALL_TIMEOUT
879 int "RCU CPU stall timeout in seconds"
880 depends on RCU_CPU_STALL_DETECTOR
881 range 3 300
882 default 60
883 help
884 If a given RCU grace period extends more than the specified
885 number of seconds, a CPU stall warning is printed. If the
886 RCU grace period persists, additional CPU stall warnings are
887 printed at more widely spaced intervals.
888
889config RCU_CPU_STALL_DETECTOR_RUNNABLE
890 bool "RCU CPU stall checking starts automatically at boot"
891 depends on RCU_CPU_STALL_DETECTOR
892 default y
893 help
894 If set, start checking for RCU CPU stalls immediately on
895 boot. Otherwise, RCU CPU stall checking must be manually
896 enabled.
897
898 Say Y if you are unsure.
899
900 Say N if you wish to suppress RCU CPU stall checking during boot.
901
835config RCU_CPU_STALL_VERBOSE 902config RCU_CPU_STALL_VERBOSE
836 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" 903 bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"
837 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU 904 depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU
diff --git a/lib/bitmap.c b/lib/bitmap.c
index ffb78c916ccd..741fae905ae3 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -359,7 +359,6 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area);
359 359
360#define CHUNKSZ 32 360#define CHUNKSZ 32
361#define nbits_to_hold_value(val) fls(val) 361#define nbits_to_hold_value(val) fls(val)
362#define unhex(c) (isdigit(c) ? (c - '0') : (toupper(c) - 'A' + 10))
363#define BASEDEC 10 /* fancier cpuset lists input in decimal */ 362#define BASEDEC 10 /* fancier cpuset lists input in decimal */
364 363
365/** 364/**
@@ -466,7 +465,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
466 if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1)) 465 if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1))
467 return -EOVERFLOW; 466 return -EOVERFLOW;
468 467
469 chunk = (chunk << 4) | unhex(c); 468 chunk = (chunk << 4) | hex_to_bin(c);
470 ndigits++; totaldigits++; 469 ndigits++; totaldigits++;
471 } 470 }
472 if (ndigits == 0) 471 if (ndigits == 0)
diff --git a/lib/div64.c b/lib/div64.c
index a111eb8de9cf..5b4919191778 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -77,26 +77,58 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
77EXPORT_SYMBOL(div_s64_rem); 77EXPORT_SYMBOL(div_s64_rem);
78#endif 78#endif
79 79
80/* 64bit divisor, dividend and result. dynamic precision */ 80/**
81 * div64_u64 - unsigned 64bit divide with 64bit divisor
82 * @dividend: 64bit dividend
83 * @divisor: 64bit divisor
84 *
85 * This implementation is a modified version of the algorithm proposed
86 * by the book 'Hacker's Delight'. The original source and full proof
87 * can be found here and is available for use without restriction.
88 *
89 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c'
90 */
81#ifndef div64_u64 91#ifndef div64_u64
82u64 div64_u64(u64 dividend, u64 divisor) 92u64 div64_u64(u64 dividend, u64 divisor)
83{ 93{
84 u32 high, d; 94 u32 high = divisor >> 32;
95 u64 quot;
85 96
86 high = divisor >> 32; 97 if (high == 0) {
87 if (high) { 98 quot = div_u64(dividend, divisor);
88 unsigned int shift = fls(high); 99 } else {
100 int n = 1 + fls(high);
101 quot = div_u64(dividend >> n, divisor >> n);
89 102
90 d = divisor >> shift; 103 if (quot != 0)
91 dividend >>= shift; 104 quot--;
92 } else 105 if ((dividend - quot * divisor) >= divisor)
93 d = divisor; 106 quot++;
107 }
94 108
95 return div_u64(dividend, d); 109 return quot;
96} 110}
97EXPORT_SYMBOL(div64_u64); 111EXPORT_SYMBOL(div64_u64);
98#endif 112#endif
99 113
114/**
115 * div64_s64 - signed 64bit divide with 64bit divisor
116 * @dividend: 64bit dividend
117 * @divisor: 64bit divisor
118 */
119#ifndef div64_s64
120s64 div64_s64(s64 dividend, s64 divisor)
121{
122 s64 quot, t;
123
124 quot = div64_u64(abs64(dividend), abs64(divisor));
125 t = (dividend ^ divisor) >> 63;
126
127 return (quot ^ t) - t;
128}
129EXPORT_SYMBOL(div64_s64);
130#endif
131
100#endif /* BITS_PER_LONG == 32 */ 132#endif /* BITS_PER_LONG == 32 */
101 133
102/* 134/*
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 01e64270e246..4bfb0471f106 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -590,6 +590,7 @@ out_unlock:
590static const struct file_operations filter_fops = { 590static const struct file_operations filter_fops = {
591 .read = filter_read, 591 .read = filter_read,
592 .write = filter_write, 592 .write = filter_write,
593 .llseek = default_llseek,
593}; 594};
594 595
595static int dma_debug_fs_init(void) 596static int dma_debug_fs_init(void)
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 02afc2533728..3094318bfea7 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -26,19 +26,11 @@
26#include <linux/dynamic_debug.h> 26#include <linux/dynamic_debug.h>
27#include <linux/debugfs.h> 27#include <linux/debugfs.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/jump_label.h>
29 30
30extern struct _ddebug __start___verbose[]; 31extern struct _ddebug __start___verbose[];
31extern struct _ddebug __stop___verbose[]; 32extern struct _ddebug __stop___verbose[];
32 33
33/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which
34 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
35 * use independent hash functions, to reduce the chance of false positives.
36 */
37long long dynamic_debug_enabled;
38EXPORT_SYMBOL_GPL(dynamic_debug_enabled);
39long long dynamic_debug_enabled2;
40EXPORT_SYMBOL_GPL(dynamic_debug_enabled2);
41
42struct ddebug_table { 34struct ddebug_table {
43 struct list_head link; 35 struct list_head link;
44 char *mod_name; 36 char *mod_name;
@@ -88,26 +80,6 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,
88} 80}
89 81
90/* 82/*
91 * must be called with ddebug_lock held
92 */
93
94static int disabled_hash(char hash, bool first_table)
95{
96 struct ddebug_table *dt;
97 char table_hash_value;
98
99 list_for_each_entry(dt, &ddebug_tables, link) {
100 if (first_table)
101 table_hash_value = dt->ddebugs->primary_hash;
102 else
103 table_hash_value = dt->ddebugs->secondary_hash;
104 if (dt->num_enabled && (hash == table_hash_value))
105 return 0;
106 }
107 return 1;
108}
109
110/*
111 * Search the tables for _ddebug's which match the given 83 * Search the tables for _ddebug's which match the given
112 * `query' and apply the `flags' and `mask' to them. Tells 84 * `query' and apply the `flags' and `mask' to them. Tells
113 * the user which ddebug's were changed, or whether none 85 * the user which ddebug's were changed, or whether none
@@ -170,17 +142,9 @@ static void ddebug_change(const struct ddebug_query *query,
170 dt->num_enabled++; 142 dt->num_enabled++;
171 dp->flags = newflags; 143 dp->flags = newflags;
172 if (newflags) { 144 if (newflags) {
173 dynamic_debug_enabled |= 145 jump_label_enable(&dp->enabled);
174 (1LL << dp->primary_hash);
175 dynamic_debug_enabled2 |=
176 (1LL << dp->secondary_hash);
177 } else { 146 } else {
178 if (disabled_hash(dp->primary_hash, true)) 147 jump_label_disable(&dp->enabled);
179 dynamic_debug_enabled &=
180 ~(1LL << dp->primary_hash);
181 if (disabled_hash(dp->secondary_hash, false))
182 dynamic_debug_enabled2 &=
183 ~(1LL << dp->secondary_hash);
184 } 148 }
185 if (verbose) 149 if (verbose)
186 printk(KERN_INFO 150 printk(KERN_INFO
@@ -429,6 +393,40 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
429 return 0; 393 return 0;
430} 394}
431 395
396static int ddebug_exec_query(char *query_string)
397{
398 unsigned int flags = 0, mask = 0;
399 struct ddebug_query query;
400#define MAXWORDS 9
401 int nwords;
402 char *words[MAXWORDS];
403
404 nwords = ddebug_tokenize(query_string, words, MAXWORDS);
405 if (nwords <= 0)
406 return -EINVAL;
407 if (ddebug_parse_query(words, nwords-1, &query))
408 return -EINVAL;
409 if (ddebug_parse_flags(words[nwords-1], &flags, &mask))
410 return -EINVAL;
411
412 /* actually go and implement the change */
413 ddebug_change(&query, flags, mask);
414 return 0;
415}
416
417static __initdata char ddebug_setup_string[1024];
418static __init int ddebug_setup_query(char *str)
419{
420 if (strlen(str) >= 1024) {
421 pr_warning("ddebug boot param string too large\n");
422 return 0;
423 }
424 strcpy(ddebug_setup_string, str);
425 return 1;
426}
427
428__setup("ddebug_query=", ddebug_setup_query);
429
432/* 430/*
433 * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the 431 * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the
434 * command text from userspace, parses and executes it. 432 * command text from userspace, parses and executes it.
@@ -436,12 +434,8 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp,
436static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, 434static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
437 size_t len, loff_t *offp) 435 size_t len, loff_t *offp)
438{ 436{
439 unsigned int flags = 0, mask = 0;
440 struct ddebug_query query;
441#define MAXWORDS 9
442 int nwords;
443 char *words[MAXWORDS];
444 char tmpbuf[256]; 437 char tmpbuf[256];
438 int ret;
445 439
446 if (len == 0) 440 if (len == 0)
447 return 0; 441 return 0;
@@ -455,16 +449,9 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf,
455 printk(KERN_INFO "%s: read %d bytes from userspace\n", 449 printk(KERN_INFO "%s: read %d bytes from userspace\n",
456 __func__, (int)len); 450 __func__, (int)len);
457 451
458 nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS); 452 ret = ddebug_exec_query(tmpbuf);
459 if (nwords <= 0) 453 if (ret)
460 return -EINVAL; 454 return ret;
461 if (ddebug_parse_query(words, nwords-1, &query))
462 return -EINVAL;
463 if (ddebug_parse_flags(words[nwords-1], &flags, &mask))
464 return -EINVAL;
465
466 /* actually go and implement the change */
467 ddebug_change(&query, flags, mask);
468 455
469 *offp += len; 456 *offp += len;
470 return len; 457 return len;
@@ -725,13 +712,14 @@ static void ddebug_remove_all_tables(void)
725 mutex_unlock(&ddebug_lock); 712 mutex_unlock(&ddebug_lock);
726} 713}
727 714
728static int __init dynamic_debug_init(void) 715static __initdata int ddebug_init_success;
716
717static int __init dynamic_debug_init_debugfs(void)
729{ 718{
730 struct dentry *dir, *file; 719 struct dentry *dir, *file;
731 struct _ddebug *iter, *iter_start; 720
732 const char *modname = NULL; 721 if (!ddebug_init_success)
733 int ret = 0; 722 return -ENODEV;
734 int n = 0;
735 723
736 dir = debugfs_create_dir("dynamic_debug", NULL); 724 dir = debugfs_create_dir("dynamic_debug", NULL);
737 if (!dir) 725 if (!dir)
@@ -742,6 +730,16 @@ static int __init dynamic_debug_init(void)
742 debugfs_remove(dir); 730 debugfs_remove(dir);
743 return -ENOMEM; 731 return -ENOMEM;
744 } 732 }
733 return 0;
734}
735
736static int __init dynamic_debug_init(void)
737{
738 struct _ddebug *iter, *iter_start;
739 const char *modname = NULL;
740 int ret = 0;
741 int n = 0;
742
745 if (__start___verbose != __stop___verbose) { 743 if (__start___verbose != __stop___verbose) {
746 iter = __start___verbose; 744 iter = __start___verbose;
747 modname = iter->modname; 745 modname = iter->modname;
@@ -759,12 +757,26 @@ static int __init dynamic_debug_init(void)
759 } 757 }
760 ret = ddebug_add_module(iter_start, n, modname); 758 ret = ddebug_add_module(iter_start, n, modname);
761 } 759 }
760
761 /* ddebug_query boot param got passed -> set it up */
762 if (ddebug_setup_string[0] != '\0') {
763 ret = ddebug_exec_query(ddebug_setup_string);
764 if (ret)
765 pr_warning("Invalid ddebug boot param %s",
766 ddebug_setup_string);
767 else
768 pr_info("ddebug initialized with string %s",
769 ddebug_setup_string);
770 }
771
762out_free: 772out_free:
763 if (ret) { 773 if (ret)
764 ddebug_remove_all_tables(); 774 ddebug_remove_all_tables();
765 debugfs_remove(dir); 775 else
766 debugfs_remove(file); 776 ddebug_init_success = 1;
767 }
768 return 0; 777 return 0;
769} 778}
770module_init(dynamic_debug_init); 779/* Allow early initialization for boot messages via boot param */
780arch_initcall(dynamic_debug_init);
781/* Debugfs setup must be done later */
782module_init(dynamic_debug_init_debugfs);
diff --git a/lib/idr.c b/lib/idr.c
index 7f1a4f0acf50..e35850d3004a 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -110,9 +110,10 @@ static void idr_mark_full(struct idr_layer **pa, int id)
110 * @idp: idr handle 110 * @idp: idr handle
111 * @gfp_mask: memory allocation flags 111 * @gfp_mask: memory allocation flags
112 * 112 *
113 * This function should be called prior to locking and calling the 113 * This function should be called prior to calling the idr_get_new* functions.
114 * idr_get_new* functions. It preallocates enough memory to satisfy 114 * It preallocates enough memory to satisfy the worst possible allocation. The
115 * the worst possible allocation. 115 * caller should pass in GFP_KERNEL if possible. This of course requires that
116 * no spinning locks be held.
116 * 117 *
117 * If the system is REALLY out of memory this function returns 0, 118 * If the system is REALLY out of memory this function returns 0,
118 * otherwise 1. 119 * otherwise 1.
@@ -284,15 +285,17 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
284 * idr_get_new_above - allocate new idr entry above or equal to a start id 285 * idr_get_new_above - allocate new idr entry above or equal to a start id
285 * @idp: idr handle 286 * @idp: idr handle
286 * @ptr: pointer you want associated with the id 287 * @ptr: pointer you want associated with the id
287 * @start_id: id to start search at 288 * @starting_id: id to start search at
288 * @id: pointer to the allocated handle 289 * @id: pointer to the allocated handle
289 * 290 *
290 * This is the allocate id function. It should be called with any 291 * This is the allocate id function. It should be called with any
291 * required locks. 292 * required locks.
292 * 293 *
293 * If memory is required, it will return -EAGAIN, you should unlock 294 * If allocation from IDR's private freelist fails, idr_get_new_above() will
294 * and go back to the idr_pre_get() call. If the idr is full, it will 295 * return -EAGAIN. The caller should retry the idr_pre_get() call to refill
295 * return -ENOSPC. 296 * IDR's preallocation and then retry the idr_get_new_above() call.
297 *
298 * If the idr is full idr_get_new_above() will return -ENOSPC.
296 * 299 *
297 * @id returns a value in the range @starting_id ... 0x7fffffff 300 * @id returns a value in the range @starting_id ... 0x7fffffff
298 */ 301 */
@@ -318,12 +321,11 @@ EXPORT_SYMBOL(idr_get_new_above);
318 * @ptr: pointer you want associated with the id 321 * @ptr: pointer you want associated with the id
319 * @id: pointer to the allocated handle 322 * @id: pointer to the allocated handle
320 * 323 *
321 * This is the allocate id function. It should be called with any 324 * If allocation from IDR's private freelist fails, idr_get_new_above() will
322 * required locks. 325 * return -EAGAIN. The caller should retry the idr_pre_get() call to refill
326 * IDR's preallocation and then retry the idr_get_new_above() call.
323 * 327 *
324 * If memory is required, it will return -EAGAIN, you should unlock 328 * If the idr is full idr_get_new_above() will return -ENOSPC.
325 * and go back to the idr_pre_get() call. If the idr is full, it will
326 * return -ENOSPC.
327 * 329 *
328 * @id returns a value in the range 0 ... 0x7fffffff 330 * @id returns a value in the range 0 ... 0x7fffffff
329 */ 331 */
@@ -479,7 +481,7 @@ EXPORT_SYMBOL(idr_remove_all);
479 481
480/** 482/**
481 * idr_destroy - release all cached layers within an idr tree 483 * idr_destroy - release all cached layers within an idr tree
482 * idp: idr handle 484 * @idp: idr handle
483 */ 485 */
484void idr_destroy(struct idr *idp) 486void idr_destroy(struct idr *idp)
485{ 487{
@@ -586,10 +588,11 @@ EXPORT_SYMBOL(idr_for_each);
586/** 588/**
587 * idr_get_next - lookup next object of id to given id. 589 * idr_get_next - lookup next object of id to given id.
588 * @idp: idr handle 590 * @idp: idr handle
589 * @id: pointer to lookup key 591 * @nextidp: pointer to lookup key
590 * 592 *
591 * Returns pointer to registered object with id, which is next number to 593 * Returns pointer to registered object with id, which is next number to
592 * given id. 594 * given id. After being looked up, *@nextidp will be updated for the next
595 * iteration.
593 */ 596 */
594 597
595void *idr_get_next(struct idr *idp, int *nextidp) 598void *idr_get_next(struct idr *idp, int *nextidp)
@@ -758,7 +761,7 @@ EXPORT_SYMBOL(ida_pre_get);
758/** 761/**
759 * ida_get_new_above - allocate new ID above or equal to a start id 762 * ida_get_new_above - allocate new ID above or equal to a start id
760 * @ida: ida handle 763 * @ida: ida handle
761 * @staring_id: id to start search at 764 * @starting_id: id to start search at
762 * @p_id: pointer to the allocated handle 765 * @p_id: pointer to the allocated handle
763 * 766 *
764 * Allocate new ID above or equal to @ida. It should be called with 767 * Allocate new ID above or equal to @ida. It should be called with
@@ -912,7 +915,7 @@ EXPORT_SYMBOL(ida_remove);
912 915
913/** 916/**
914 * ida_destroy - release all cached layers within an ida tree 917 * ida_destroy - release all cached layers within an ida tree
915 * ida: ida handle 918 * @ida: ida handle
916 */ 919 */
917void ida_destroy(struct ida *ida) 920void ida_destroy(struct ida *ida)
918{ 921{
diff --git a/lib/kobject.c b/lib/kobject.c
index f07c57252e82..82dc34c095c2 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -746,17 +746,56 @@ void kset_unregister(struct kset *k)
746 */ 746 */
747struct kobject *kset_find_obj(struct kset *kset, const char *name) 747struct kobject *kset_find_obj(struct kset *kset, const char *name)
748{ 748{
749 return kset_find_obj_hinted(kset, name, NULL);
750}
751
752/**
753 * kset_find_obj_hinted - search for object in kset given a predecessor hint.
754 * @kset: kset we're looking in.
755 * @name: object's name.
756 * @hint: hint to possible object's predecessor.
757 *
758 * Check the hint's next object and if it is a match return it directly,
759 * otherwise, fall back to the behavior of kset_find_obj(). Either way
760 * a reference for the returned object is held and the reference on the
761 * hinted object is released.
762 */
763struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name,
764 struct kobject *hint)
765{
749 struct kobject *k; 766 struct kobject *k;
750 struct kobject *ret = NULL; 767 struct kobject *ret = NULL;
751 768
752 spin_lock(&kset->list_lock); 769 spin_lock(&kset->list_lock);
770
771 if (!hint)
772 goto slow_search;
773
774 /* end of list detection */
775 if (hint->entry.next == kset->list.next)
776 goto slow_search;
777
778 k = container_of(hint->entry.next, struct kobject, entry);
779 if (!kobject_name(k) || strcmp(kobject_name(k), name))
780 goto slow_search;
781
782 ret = kobject_get(k);
783 goto unlock_exit;
784
785slow_search:
753 list_for_each_entry(k, &kset->list, entry) { 786 list_for_each_entry(k, &kset->list, entry) {
754 if (kobject_name(k) && !strcmp(kobject_name(k), name)) { 787 if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
755 ret = kobject_get(k); 788 ret = kobject_get(k);
756 break; 789 break;
757 } 790 }
758 } 791 }
792
793unlock_exit:
759 spin_unlock(&kset->list_lock); 794 spin_unlock(&kset->list_lock);
795
796 if (hint)
797 kobject_put(hint);
798
760 return ret; 799 return ret;
761} 800}
762 801
diff --git a/lib/list_sort.c b/lib/list_sort.c
index a7616fa3162e..d7325c6b103f 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -141,77 +141,151 @@ void list_sort(void *priv, struct list_head *head,
141} 141}
142EXPORT_SYMBOL(list_sort); 142EXPORT_SYMBOL(list_sort);
143 143
144#ifdef DEBUG_LIST_SORT 144#ifdef CONFIG_TEST_LIST_SORT
145
146#include <linux/random.h>
147
148/*
149 * The pattern of set bits in the list length determines which cases
150 * are hit in list_sort().
151 */
152#define TEST_LIST_LEN (512+128+2) /* not including head */
153
154#define TEST_POISON1 0xDEADBEEF
155#define TEST_POISON2 0xA324354C
156
145struct debug_el { 157struct debug_el {
146 struct list_head l_h; 158 unsigned int poison1;
159 struct list_head list;
160 unsigned int poison2;
147 int value; 161 int value;
148 unsigned serial; 162 unsigned serial;
149}; 163};
150 164
151static int cmp(void *priv, struct list_head *a, struct list_head *b) 165/* Array, containing pointers to all elements in the test list */
166static struct debug_el **elts __initdata;
167
168static int __init check(struct debug_el *ela, struct debug_el *elb)
152{ 169{
153 return container_of(a, struct debug_el, l_h)->value 170 if (ela->serial >= TEST_LIST_LEN) {
154 - container_of(b, struct debug_el, l_h)->value; 171 printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
172 ela->serial);
173 return -EINVAL;
174 }
175 if (elb->serial >= TEST_LIST_LEN) {
176 printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n",
177 elb->serial);
178 return -EINVAL;
179 }
180 if (elts[ela->serial] != ela || elts[elb->serial] != elb) {
181 printk(KERN_ERR "list_sort_test: error: phantom element\n");
182 return -EINVAL;
183 }
184 if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) {
185 printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
186 ela->poison1, ela->poison2);
187 return -EINVAL;
188 }
189 if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) {
190 printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n",
191 elb->poison1, elb->poison2);
192 return -EINVAL;
193 }
194 return 0;
155} 195}
156 196
157/* 197static int __init cmp(void *priv, struct list_head *a, struct list_head *b)
158 * The pattern of set bits in the list length determines which cases 198{
159 * are hit in list_sort(). 199 struct debug_el *ela, *elb;
160 */ 200
161#define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */ 201 ela = container_of(a, struct debug_el, list);
202 elb = container_of(b, struct debug_el, list);
203
204 check(ela, elb);
205 return ela->value - elb->value;
206}
162 207
163static int __init list_sort_test(void) 208static int __init list_sort_test(void)
164{ 209{
165 int i, r = 1, count; 210 int i, count = 1, err = -EINVAL;
166 struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL); 211 struct debug_el *el;
167 struct list_head *cur; 212 struct list_head *cur, *tmp;
213 LIST_HEAD(head);
214
215 printk(KERN_DEBUG "list_sort_test: start testing list_sort()\n");
168 216
169 printk(KERN_WARNING "testing list_sort()\n"); 217 elts = kmalloc(sizeof(void *) * TEST_LIST_LEN, GFP_KERNEL);
218 if (!elts) {
219 printk(KERN_ERR "list_sort_test: error: cannot allocate "
220 "memory\n");
221 goto exit;
222 }
170 223
171 cur = head; 224 for (i = 0; i < TEST_LIST_LEN; i++) {
172 for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) { 225 el = kmalloc(sizeof(*el), GFP_KERNEL);
173 struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL); 226 if (!el) {
174 BUG_ON(!el); 227 printk(KERN_ERR "list_sort_test: error: cannot "
228 "allocate memory\n");
229 goto exit;
230 }
175 /* force some equivalencies */ 231 /* force some equivalencies */
176 el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3); 232 el->value = random32() % (TEST_LIST_LEN/3);
177 el->serial = i; 233 el->serial = i;
178 234 el->poison1 = TEST_POISON1;
179 el->l_h.prev = cur; 235 el->poison2 = TEST_POISON2;
180 cur->next = &el->l_h; 236 elts[i] = el;
181 cur = cur->next; 237 list_add_tail(&el->list, &head);
182 } 238 }
183 head->prev = cur;
184 239
185 list_sort(NULL, head, cmp); 240 list_sort(NULL, &head, cmp);
241
242 for (cur = head.next; cur->next != &head; cur = cur->next) {
243 struct debug_el *el1;
244 int cmp_result;
186 245
187 count = 1;
188 for (cur = head->next; cur->next != head; cur = cur->next) {
189 struct debug_el *el = container_of(cur, struct debug_el, l_h);
190 int cmp_result = cmp(NULL, cur, cur->next);
191 if (cur->next->prev != cur) { 246 if (cur->next->prev != cur) {
192 printk(KERN_EMERG "list_sort() returned " 247 printk(KERN_ERR "list_sort_test: error: list is "
193 "a corrupted list!\n"); 248 "corrupted\n");
194 return 1; 249 goto exit;
195 } else if (cmp_result > 0) { 250 }
196 printk(KERN_EMERG "list_sort() failed to sort!\n"); 251
197 return 1; 252 cmp_result = cmp(NULL, cur, cur->next);
198 } else if (cmp_result == 0 && 253 if (cmp_result > 0) {
199 el->serial >= container_of(cur->next, 254 printk(KERN_ERR "list_sort_test: error: list is not "
200 struct debug_el, l_h)->serial) { 255 "sorted\n");
201 printk(KERN_EMERG "list_sort() failed to preserve order" 256 goto exit;
202 " of equivalent elements!\n"); 257 }
203 return 1; 258
259 el = container_of(cur, struct debug_el, list);
260 el1 = container_of(cur->next, struct debug_el, list);
261 if (cmp_result == 0 && el->serial >= el1->serial) {
262 printk(KERN_ERR "list_sort_test: error: order of "
263 "equivalent elements not preserved\n");
264 goto exit;
265 }
266
267 if (check(el, el1)) {
268 printk(KERN_ERR "list_sort_test: error: element check "
269 "failed\n");
270 goto exit;
204 } 271 }
205 kfree(cur->prev);
206 count++; 272 count++;
207 } 273 }
208 kfree(cur); 274
209 if (count != LIST_SORT_TEST_LENGTH) { 275 if (count != TEST_LIST_LEN) {
210 printk(KERN_EMERG "list_sort() returned list of" 276 printk(KERN_ERR "list_sort_test: error: bad list length %d",
211 "different length!\n"); 277 count);
212 return 1; 278 goto exit;
213 } 279 }
214 return 0; 280
281 err = 0;
282exit:
283 kfree(elts);
284 list_for_each_safe(cur, tmp, &head) {
285 list_del(cur);
286 kfree(container_of(cur, struct debug_el, list));
287 }
288 return err;
215} 289}
216module_init(list_sort_test); 290module_init(list_sort_test);
217#endif 291#endif /* CONFIG_TEST_LIST_SORT */
diff --git a/lib/parser.c b/lib/parser.c
index fb34977246bb..6e89eca5cca0 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -128,12 +128,13 @@ static int match_number(substring_t *s, int *result, int base)
128 char *endp; 128 char *endp;
129 char *buf; 129 char *buf;
130 int ret; 130 int ret;
131 size_t len = s->to - s->from;
131 132
132 buf = kmalloc(s->to - s->from + 1, GFP_KERNEL); 133 buf = kmalloc(len + 1, GFP_KERNEL);
133 if (!buf) 134 if (!buf)
134 return -ENOMEM; 135 return -ENOMEM;
135 memcpy(buf, s->from, s->to - s->from); 136 memcpy(buf, s->from, len);
136 buf[s->to - s->from] = '\0'; 137 buf[len] = '\0';
137 *result = simple_strtol(buf, &endp, base); 138 *result = simple_strtol(buf, &endp, base);
138 ret = 0; 139 ret = 0;
139 if (endp == buf) 140 if (endp == buf)
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index ec9048e74f44..604678d7d06d 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -8,10 +8,53 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/cpu.h> 9#include <linux/cpu.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/debugobjects.h>
11 12
12static LIST_HEAD(percpu_counters); 13static LIST_HEAD(percpu_counters);
13static DEFINE_MUTEX(percpu_counters_lock); 14static DEFINE_MUTEX(percpu_counters_lock);
14 15
16#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
17
18static struct debug_obj_descr percpu_counter_debug_descr;
19
20static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
21{
22 struct percpu_counter *fbc = addr;
23
24 switch (state) {
25 case ODEBUG_STATE_ACTIVE:
26 percpu_counter_destroy(fbc);
27 debug_object_free(fbc, &percpu_counter_debug_descr);
28 return 1;
29 default:
30 return 0;
31 }
32}
33
34static struct debug_obj_descr percpu_counter_debug_descr = {
35 .name = "percpu_counter",
36 .fixup_free = percpu_counter_fixup_free,
37};
38
39static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
40{
41 debug_object_init(fbc, &percpu_counter_debug_descr);
42 debug_object_activate(fbc, &percpu_counter_debug_descr);
43}
44
45static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
46{
47 debug_object_deactivate(fbc, &percpu_counter_debug_descr);
48 debug_object_free(fbc, &percpu_counter_debug_descr);
49}
50
51#else /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
52static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
53{ }
54static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
55{ }
56#endif /* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
57
15void percpu_counter_set(struct percpu_counter *fbc, s64 amount) 58void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
16{ 59{
17 int cpu; 60 int cpu;
@@ -30,9 +73,9 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
30{ 73{
31 s64 count; 74 s64 count;
32 s32 *pcount; 75 s32 *pcount;
33 int cpu = get_cpu();
34 76
35 pcount = per_cpu_ptr(fbc->counters, cpu); 77 preempt_disable();
78 pcount = this_cpu_ptr(fbc->counters);
36 count = *pcount + amount; 79 count = *pcount + amount;
37 if (count >= batch || count <= -batch) { 80 if (count >= batch || count <= -batch) {
38 spin_lock(&fbc->lock); 81 spin_lock(&fbc->lock);
@@ -42,7 +85,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
42 } else { 85 } else {
43 *pcount = count; 86 *pcount = count;
44 } 87 }
45 put_cpu(); 88 preempt_enable();
46} 89}
47EXPORT_SYMBOL(__percpu_counter_add); 90EXPORT_SYMBOL(__percpu_counter_add);
48 91
@@ -75,7 +118,11 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
75 fbc->counters = alloc_percpu(s32); 118 fbc->counters = alloc_percpu(s32);
76 if (!fbc->counters) 119 if (!fbc->counters)
77 return -ENOMEM; 120 return -ENOMEM;
121
122 debug_percpu_counter_activate(fbc);
123
78#ifdef CONFIG_HOTPLUG_CPU 124#ifdef CONFIG_HOTPLUG_CPU
125 INIT_LIST_HEAD(&fbc->list);
79 mutex_lock(&percpu_counters_lock); 126 mutex_lock(&percpu_counters_lock);
80 list_add(&fbc->list, &percpu_counters); 127 list_add(&fbc->list, &percpu_counters);
81 mutex_unlock(&percpu_counters_lock); 128 mutex_unlock(&percpu_counters_lock);
@@ -89,6 +136,8 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
89 if (!fbc->counters) 136 if (!fbc->counters)
90 return; 137 return;
91 138
139 debug_percpu_counter_deactivate(fbc);
140
92#ifdef CONFIG_HOTPLUG_CPU 141#ifdef CONFIG_HOTPLUG_CPU
93 mutex_lock(&percpu_counters_lock); 142 mutex_lock(&percpu_counters_lock);
94 list_del(&fbc->list); 143 list_del(&fbc->list);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index efd16fa80b1c..6f412ab4c24f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -49,7 +49,7 @@ struct radix_tree_node {
49 unsigned int height; /* Height from the bottom */ 49 unsigned int height; /* Height from the bottom */
50 unsigned int count; 50 unsigned int count;
51 struct rcu_head rcu_head; 51 struct rcu_head rcu_head;
52 void *slots[RADIX_TREE_MAP_SIZE]; 52 void __rcu *slots[RADIX_TREE_MAP_SIZE];
53 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; 53 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
54}; 54};
55 55
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 34e3082632d8..7c06ee51a29a 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;
70 */ 70 */
71static unsigned long io_tlb_overflow = 32*1024; 71static unsigned long io_tlb_overflow = 32*1024;
72 72
73void *io_tlb_overflow_buffer; 73static void *io_tlb_overflow_buffer;
74 74
75/* 75/*
76 * This is a free list describing the number of free entries available from 76 * This is a free list describing the number of free entries available from
@@ -147,16 +147,16 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 147 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
148 * between io_tlb_start and io_tlb_end. 148 * between io_tlb_start and io_tlb_end.
149 */ 149 */
150 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 150 io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
151 for (i = 0; i < io_tlb_nslabs; i++) 151 for (i = 0; i < io_tlb_nslabs; i++)
152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 152 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
153 io_tlb_index = 0; 153 io_tlb_index = 0;
154 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); 154 io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
155 155
156 /* 156 /*
157 * Get the overflow emergency buffer 157 * Get the overflow emergency buffer
158 */ 158 */
159 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 159 io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));
160 if (!io_tlb_overflow_buffer) 160 if (!io_tlb_overflow_buffer)
161 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 161 panic("Cannot allocate SWIOTLB overflow buffer!\n");
162 if (verbose) 162 if (verbose)
@@ -182,7 +182,7 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
182 /* 182 /*
183 * Get IO TLB memory from the low pages 183 * Get IO TLB memory from the low pages
184 */ 184 */
185 io_tlb_start = alloc_bootmem_low_pages(bytes); 185 io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));
186 if (!io_tlb_start) 186 if (!io_tlb_start)
187 panic("Cannot allocate SWIOTLB buffer"); 187 panic("Cannot allocate SWIOTLB buffer");
188 188
@@ -308,13 +308,13 @@ void __init swiotlb_free(void)
308 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 308 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
309 } else { 309 } else {
310 free_bootmem_late(__pa(io_tlb_overflow_buffer), 310 free_bootmem_late(__pa(io_tlb_overflow_buffer),
311 io_tlb_overflow); 311 PAGE_ALIGN(io_tlb_overflow));
312 free_bootmem_late(__pa(io_tlb_orig_addr), 312 free_bootmem_late(__pa(io_tlb_orig_addr),
313 io_tlb_nslabs * sizeof(phys_addr_t)); 313 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
314 free_bootmem_late(__pa(io_tlb_list), 314 free_bootmem_late(__pa(io_tlb_list),
315 io_tlb_nslabs * sizeof(int)); 315 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
316 free_bootmem_late(__pa(io_tlb_start), 316 free_bootmem_late(__pa(io_tlb_start),
317 io_tlb_nslabs << IO_TLB_SHIFT); 317 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
318 } 318 }
319} 319}
320 320
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 7af9d841c43b..c150d3dafff4 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -988,8 +988,15 @@ static noinline_for_stack
988char *pointer(const char *fmt, char *buf, char *end, void *ptr, 988char *pointer(const char *fmt, char *buf, char *end, void *ptr,
989 struct printf_spec spec) 989 struct printf_spec spec)
990{ 990{
991 if (!ptr) 991 if (!ptr) {
992 /*
993 * Print (null) with the same width as a pointer so it makes
994 * tabular output look nice.
995 */
996 if (spec.field_width == -1)
997 spec.field_width = 2 * sizeof(void *);
992 return string(buf, end, "(null)", spec); 998 return string(buf, end, "(null)", spec);
999 }
993 1000
994 switch (*fmt) { 1001 switch (*fmt) {
995 case 'F': 1002 case 'F':
@@ -1031,7 +1038,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1031 } 1038 }
1032 spec.flags |= SMALL; 1039 spec.flags |= SMALL;
1033 if (spec.field_width == -1) { 1040 if (spec.field_width == -1) {
1034 spec.field_width = 2*sizeof(void *); 1041 spec.field_width = 2 * sizeof(void *);
1035 spec.flags |= ZEROPAD; 1042 spec.flags |= ZEROPAD;
1036 } 1043 }
1037 spec.base = 16; 1044 spec.base = 16;
@@ -1497,7 +1504,7 @@ EXPORT_SYMBOL(snprintf);
1497 * @...: Arguments for the format string 1504 * @...: Arguments for the format string
1498 * 1505 *
1499 * The return value is the number of characters written into @buf not including 1506 * The return value is the number of characters written into @buf not including
1500 * the trailing '\0'. If @size is <= 0 the function returns 0. 1507 * the trailing '\0'. If @size is == 0 the function returns 0.
1501 */ 1508 */
1502 1509
1503int scnprintf(char *buf, size_t size, const char *fmt, ...) 1510int scnprintf(char *buf, size_t size, const char *fmt, ...)
@@ -1509,7 +1516,11 @@ int scnprintf(char *buf, size_t size, const char *fmt, ...)
1509 i = vsnprintf(buf, size, fmt, args); 1516 i = vsnprintf(buf, size, fmt, args);
1510 va_end(args); 1517 va_end(args);
1511 1518
1512 return (i >= size) ? (size - 1) : i; 1519 if (likely(i < size))
1520 return i;
1521 if (size != 0)
1522 return size - 1;
1523 return 0;
1513} 1524}
1514EXPORT_SYMBOL(scnprintf); 1525EXPORT_SYMBOL(scnprintf);
1515 1526