aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug33
-rw-r--r--lib/Makefile2
-rw-r--r--lib/argv_split.c13
-rw-r--r--lib/bitmap.c81
-rw-r--r--lib/checksum.c14
-rw-r--r--lib/crc32.c121
-rw-r--r--lib/ctype.c50
-rw-r--r--lib/debugobjects.c75
-rw-r--r--lib/decompress_bunzip2.c2
-rw-r--r--lib/decompress_inflate.c8
-rw-r--r--lib/decompress_unlzma.c10
-rw-r--r--lib/dma-debug.c8
-rw-r--r--lib/dynamic_debug.c4
-rw-r--r--lib/fault-inject.c1
-rw-r--r--lib/genalloc.c33
-rw-r--r--lib/idr.c4
-rw-r--r--lib/iommu-helper.c59
-rw-r--r--lib/kernel_lock.c46
-rw-r--r--lib/lru_cache.c560
-rw-r--r--lib/parser.c11
-rw-r--r--lib/plist.c8
-rw-r--r--lib/radix-tree.c5
-rw-r--r--lib/ratelimit.c45
-rw-r--r--lib/rwsem-spinlock.c23
-rw-r--r--lib/spinlock_debug.c64
-rw-r--r--lib/string.c45
-rw-r--r--lib/swiotlb.c52
-rw-r--r--lib/vsprintf.c509
29 files changed, 1330 insertions, 559 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index bb1326d3839c..1cfe51628e1b 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -200,4 +200,7 @@ config NLATTR
200config GENERIC_ATOMIC64 200config GENERIC_ATOMIC64
201 bool 201 bool
202 202
203config LRU_CACHE
204 tristate
205
203endmenu 206endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d57b12f59c8c..8cf9938dd147 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -50,6 +50,14 @@ config MAGIC_SYSRQ
50 keys are documented in <file:Documentation/sysrq.txt>. Don't say Y 50 keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
51 unless you really know what this hack does. 51 unless you really know what this hack does.
52 52
53config STRIP_ASM_SYMS
54 bool "Strip assembler-generated symbols during link"
55 default n
56 help
57 Strip internal assembler-generated symbols during a link (symbols
58 that look like '.Lxxx') so they don't pollute the output of
59 get_wchan() and suchlike.
60
53config UNUSED_SYMBOLS 61config UNUSED_SYMBOLS
54 bool "Enable unused/obsolete exported symbols" 62 bool "Enable unused/obsolete exported symbols"
55 default y if X86 63 default y if X86
@@ -97,7 +105,7 @@ config DEBUG_SECTION_MISMATCH
97 bool "Enable full Section mismatch analysis" 105 bool "Enable full Section mismatch analysis"
98 depends on UNDEFINED 106 depends on UNDEFINED
99 # This option is on purpose disabled for now. 107 # This option is on purpose disabled for now.
100 # It will be enabled when we are down to a resonable number 108 # It will be enabled when we are down to a reasonable number
101 # of section mismatch warnings (< 10 for an allyesconfig build) 109 # of section mismatch warnings (< 10 for an allyesconfig build)
102 help 110 help
103 The section mismatch analysis checks if there are illegal 111 The section mismatch analysis checks if there are illegal
@@ -290,6 +298,14 @@ config DEBUG_OBJECTS_TIMERS
290 timer routines to track the life time of timer objects and 298 timer routines to track the life time of timer objects and
291 validate the timer operations. 299 validate the timer operations.
292 300
301config DEBUG_OBJECTS_WORK
302 bool "Debug work objects"
303 depends on DEBUG_OBJECTS
304 help
305 If you say Y here, additional code will be inserted into the
306 work queue routines to track the life time of work objects and
307 validate the work operations.
308
293config DEBUG_OBJECTS_ENABLE_DEFAULT 309config DEBUG_OBJECTS_ENABLE_DEFAULT
294 int "debug_objects bootup default value (0-1)" 310 int "debug_objects bootup default value (0-1)"
295 range 0 1 311 range 0 1
@@ -338,8 +354,9 @@ config SLUB_STATS
338 354
339config DEBUG_KMEMLEAK 355config DEBUG_KMEMLEAK
340 bool "Kernel memory leak detector" 356 bool "Kernel memory leak detector"
341 depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM || PPC) && \ 357 depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \
342 !MEMORY_HOTPLUG 358 (X86 || ARM || PPC || S390)
359
343 select DEBUG_FS if SYSFS 360 select DEBUG_FS if SYSFS
344 select STACKTRACE if STACKTRACE_SUPPORT 361 select STACKTRACE if STACKTRACE_SUPPORT
345 select KALLSYMS 362 select KALLSYMS
@@ -362,7 +379,7 @@ config DEBUG_KMEMLEAK
362config DEBUG_KMEMLEAK_EARLY_LOG_SIZE 379config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
363 int "Maximum kmemleak early log entries" 380 int "Maximum kmemleak early log entries"
364 depends on DEBUG_KMEMLEAK 381 depends on DEBUG_KMEMLEAK
365 range 200 2000 382 range 200 40000
366 default 400 383 default 400
367 help 384 help
368 Kmemleak must track all the memory allocations to avoid 385 Kmemleak must track all the memory allocations to avoid
@@ -383,7 +400,7 @@ config DEBUG_KMEMLEAK_TEST
383 400
384config DEBUG_PREEMPT 401config DEBUG_PREEMPT
385 bool "Debug preemptible kernel" 402 bool "Debug preemptible kernel"
386 depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) 403 depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
387 default y 404 default y
388 help 405 help
389 If you say Y here then the kernel will use a debug variant of the 406 If you say Y here then the kernel will use a debug variant of the
@@ -558,7 +575,7 @@ config DEBUG_BUGVERBOSE
558 depends on BUG 575 depends on BUG
559 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \ 576 depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
560 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 577 FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
561 default !EMBEDDED 578 default y
562 help 579 help
563 Say Y here to make BUG() panics output the file name and line number 580 Say Y here to make BUG() panics output the file name and line number
564 of the BUG call as well as the EIP and oops trace. This aids 581 of the BUG call as well as the EIP and oops trace. This aids
@@ -741,7 +758,7 @@ config RCU_TORTURE_TEST_RUNNABLE
741config RCU_CPU_STALL_DETECTOR 758config RCU_CPU_STALL_DETECTOR
742 bool "Check for stalled CPUs delaying RCU grace periods" 759 bool "Check for stalled CPUs delaying RCU grace periods"
743 depends on TREE_RCU || TREE_PREEMPT_RCU 760 depends on TREE_RCU || TREE_PREEMPT_RCU
744 default n 761 default y
745 help 762 help
746 This option causes RCU to printk information on which 763 This option causes RCU to printk information on which
747 CPUs are delaying the current grace period, but only when 764 CPUs are delaying the current grace period, but only when
@@ -903,7 +920,7 @@ config LATENCYTOP
903 920
904config SYSCTL_SYSCALL_CHECK 921config SYSCTL_SYSCALL_CHECK
905 bool "Sysctl checks" 922 bool "Sysctl checks"
906 depends on SYSCTL_SYSCALL 923 depends on SYSCTL
907 ---help--- 924 ---help---
908 sys_sysctl uses binary paths that have been found challenging 925 sys_sysctl uses binary paths that have been found challenging
909 to properly maintain and use. This enables checks that help 926 to properly maintain and use. This enables checks that help
diff --git a/lib/Makefile b/lib/Makefile
index 2e78277eff9d..347ad8db29d3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -91,6 +91,8 @@ obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o
91 91
92obj-$(CONFIG_NLATTR) += nlattr.o 92obj-$(CONFIG_NLATTR) += nlattr.o
93 93
94obj-$(CONFIG_LRU_CACHE) += lru_cache.o
95
94obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o 96obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
95 97
96obj-$(CONFIG_GENERIC_CSUM) += checksum.o 98obj-$(CONFIG_GENERIC_CSUM) += checksum.o
diff --git a/lib/argv_split.c b/lib/argv_split.c
index 5205a8dae5bc..4b1b083f219c 100644
--- a/lib/argv_split.c
+++ b/lib/argv_split.c
@@ -4,17 +4,10 @@
4 4
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/ctype.h> 6#include <linux/ctype.h>
7#include <linux/string.h>
7#include <linux/slab.h> 8#include <linux/slab.h>
8#include <linux/module.h> 9#include <linux/module.h>
9 10
10static const char *skip_sep(const char *cp)
11{
12 while (*cp && isspace(*cp))
13 cp++;
14
15 return cp;
16}
17
18static const char *skip_arg(const char *cp) 11static const char *skip_arg(const char *cp)
19{ 12{
20 while (*cp && !isspace(*cp)) 13 while (*cp && !isspace(*cp))
@@ -28,7 +21,7 @@ static int count_argc(const char *str)
28 int count = 0; 21 int count = 0;
29 22
30 while (*str) { 23 while (*str) {
31 str = skip_sep(str); 24 str = skip_spaces(str);
32 if (*str) { 25 if (*str) {
33 count++; 26 count++;
34 str = skip_arg(str); 27 str = skip_arg(str);
@@ -82,7 +75,7 @@ char **argv_split(gfp_t gfp, const char *str, int *argcp)
82 argvp = argv; 75 argvp = argv;
83 76
84 while (*str) { 77 while (*str) {
85 str = skip_sep(str); 78 str = skip_spaces(str);
86 79
87 if (*str) { 80 if (*str) {
88 const char *p = str; 81 const char *p = str;
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 702565821c99..11bf49750583 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -271,6 +271,87 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
271} 271}
272EXPORT_SYMBOL(__bitmap_weight); 272EXPORT_SYMBOL(__bitmap_weight);
273 273
274#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
275
276void bitmap_set(unsigned long *map, int start, int nr)
277{
278 unsigned long *p = map + BIT_WORD(start);
279 const int size = start + nr;
280 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
281 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
282
283 while (nr - bits_to_set >= 0) {
284 *p |= mask_to_set;
285 nr -= bits_to_set;
286 bits_to_set = BITS_PER_LONG;
287 mask_to_set = ~0UL;
288 p++;
289 }
290 if (nr) {
291 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
292 *p |= mask_to_set;
293 }
294}
295EXPORT_SYMBOL(bitmap_set);
296
297void bitmap_clear(unsigned long *map, int start, int nr)
298{
299 unsigned long *p = map + BIT_WORD(start);
300 const int size = start + nr;
301 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
302 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
303
304 while (nr - bits_to_clear >= 0) {
305 *p &= ~mask_to_clear;
306 nr -= bits_to_clear;
307 bits_to_clear = BITS_PER_LONG;
308 mask_to_clear = ~0UL;
309 p++;
310 }
311 if (nr) {
312 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
313 *p &= ~mask_to_clear;
314 }
315}
316EXPORT_SYMBOL(bitmap_clear);
317
318/*
319 * bitmap_find_next_zero_area - find a contiguous aligned zero area
320 * @map: The address to base the search on
321 * @size: The bitmap size in bits
322 * @start: The bitnumber to start searching at
323 * @nr: The number of zeroed bits we're looking for
324 * @align_mask: Alignment mask for zero area
325 *
326 * The @align_mask should be one less than a power of 2; the effect is that
327 * the bit offset of all zero areas this function finds is multiples of that
328 * power of 2. A @align_mask of 0 means no alignment is required.
329 */
330unsigned long bitmap_find_next_zero_area(unsigned long *map,
331 unsigned long size,
332 unsigned long start,
333 unsigned int nr,
334 unsigned long align_mask)
335{
336 unsigned long index, end, i;
337again:
338 index = find_next_zero_bit(map, size, start);
339
340 /* Align allocation */
341 index = __ALIGN_MASK(index, align_mask);
342
343 end = index + nr;
344 if (end > size)
345 return end;
346 i = find_next_bit(map, end, index);
347 if (i < end) {
348 start = i + 1;
349 goto again;
350 }
351 return index;
352}
353EXPORT_SYMBOL(bitmap_find_next_zero_area);
354
274/* 355/*
275 * Bitmap printing & parsing functions: first version by Bill Irwin, 356 * Bitmap printing & parsing functions: first version by Bill Irwin,
276 * second version by Paul Jackson, third by Joe Korty. 357 * second version by Paul Jackson, third by Joe Korty.
diff --git a/lib/checksum.c b/lib/checksum.c
index b2e2fd468461..097508732f34 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -37,7 +37,8 @@
37 37
38#include <asm/byteorder.h> 38#include <asm/byteorder.h>
39 39
40static inline unsigned short from32to16(unsigned long x) 40#ifndef do_csum
41static inline unsigned short from32to16(unsigned int x)
41{ 42{
42 /* add up 16-bit and 16-bit for 16+c bit */ 43 /* add up 16-bit and 16-bit for 16+c bit */
43 x = (x & 0xffff) + (x >> 16); 44 x = (x & 0xffff) + (x >> 16);
@@ -49,16 +50,16 @@ static inline unsigned short from32to16(unsigned long x)
49static unsigned int do_csum(const unsigned char *buff, int len) 50static unsigned int do_csum(const unsigned char *buff, int len)
50{ 51{
51 int odd, count; 52 int odd, count;
52 unsigned long result = 0; 53 unsigned int result = 0;
53 54
54 if (len <= 0) 55 if (len <= 0)
55 goto out; 56 goto out;
56 odd = 1 & (unsigned long) buff; 57 odd = 1 & (unsigned long) buff;
57 if (odd) { 58 if (odd) {
58#ifdef __LITTLE_ENDIAN 59#ifdef __LITTLE_ENDIAN
59 result = *buff;
60#else
61 result += (*buff << 8); 60 result += (*buff << 8);
61#else
62 result = *buff;
62#endif 63#endif
63 len--; 64 len--;
64 buff++; 65 buff++;
@@ -73,9 +74,9 @@ static unsigned int do_csum(const unsigned char *buff, int len)
73 } 74 }
74 count >>= 1; /* nr of 32-bit words.. */ 75 count >>= 1; /* nr of 32-bit words.. */
75 if (count) { 76 if (count) {
76 unsigned long carry = 0; 77 unsigned int carry = 0;
77 do { 78 do {
78 unsigned long w = *(unsigned int *) buff; 79 unsigned int w = *(unsigned int *) buff;
79 count--; 80 count--;
80 buff += 4; 81 buff += 4;
81 result += carry; 82 result += carry;
@@ -102,6 +103,7 @@ static unsigned int do_csum(const unsigned char *buff, int len)
102out: 103out:
103 return result; 104 return result;
104} 105}
106#endif
105 107
106/* 108/*
107 * This is a version of ip_compute_csum() optimized for IP headers, 109 * This is a version of ip_compute_csum() optimized for IP headers,
diff --git a/lib/crc32.c b/lib/crc32.c
index 49d1c9e3ce38..02e3b31b3a79 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -42,6 +42,48 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
42MODULE_DESCRIPTION("Ethernet CRC32 calculations"); 42MODULE_DESCRIPTION("Ethernet CRC32 calculations");
43MODULE_LICENSE("GPL"); 43MODULE_LICENSE("GPL");
44 44
45#if CRC_LE_BITS == 8 || CRC_BE_BITS == 8
46
47static inline u32
48crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab)
49{
50# ifdef __LITTLE_ENDIAN
51# define DO_CRC(x) crc = tab[(crc ^ (x)) & 255 ] ^ (crc >> 8)
52# else
53# define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8)
54# endif
55 const u32 *b = (const u32 *)buf;
56 size_t rem_len;
57
58 /* Align it */
59 if (unlikely((long)b & 3 && len)) {
60 u8 *p = (u8 *)b;
61 do {
62 DO_CRC(*p++);
63 } while ((--len) && ((long)p)&3);
64 b = (u32 *)p;
65 }
66 rem_len = len & 3;
67 /* load data 32 bits wide, xor data 32 bits wide. */
68 len = len >> 2;
69 for (--b; len; --len) {
70 crc ^= *++b; /* use pre increment for speed */
71 DO_CRC(0);
72 DO_CRC(0);
73 DO_CRC(0);
74 DO_CRC(0);
75 }
76 len = rem_len;
77 /* And the last few bytes */
78 if (len) {
79 u8 *p = (u8 *)(b + 1) - 1;
80 do {
81 DO_CRC(*++p); /* use pre increment for speed */
82 } while (--len);
83 }
84 return crc;
85}
86#endif
45/** 87/**
46 * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 88 * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
47 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for 89 * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
@@ -72,48 +114,10 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
72u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) 114u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
73{ 115{
74# if CRC_LE_BITS == 8 116# if CRC_LE_BITS == 8
75 const u32 *b =(u32 *)p;
76 const u32 *tab = crc32table_le; 117 const u32 *tab = crc32table_le;
77 118
78# ifdef __LITTLE_ENDIAN
79# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
80# else
81# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
82# endif
83
84 crc = __cpu_to_le32(crc); 119 crc = __cpu_to_le32(crc);
85 /* Align it */ 120 crc = crc32_body(crc, p, len, tab);
86 if(unlikely(((long)b)&3 && len)){
87 do {
88 u8 *p = (u8 *)b;
89 DO_CRC(*p++);
90 b = (void *)p;
91 } while ((--len) && ((long)b)&3 );
92 }
93 if(likely(len >= 4)){
94 /* load data 32 bits wide, xor data 32 bits wide. */
95 size_t save_len = len & 3;
96 len = len >> 2;
97 --b; /* use pre increment below(*++b) for speed */
98 do {
99 crc ^= *++b;
100 DO_CRC(0);
101 DO_CRC(0);
102 DO_CRC(0);
103 DO_CRC(0);
104 } while (--len);
105 b++; /* point to next byte(s) */
106 len = save_len;
107 }
108 /* And the last few bytes */
109 if(len){
110 do {
111 u8 *p = (u8 *)b;
112 DO_CRC(*p++);
113 b = (void *)p;
114 } while (--len);
115 }
116
117 return __le32_to_cpu(crc); 121 return __le32_to_cpu(crc);
118#undef ENDIAN_SHIFT 122#undef ENDIAN_SHIFT
119#undef DO_CRC 123#undef DO_CRC
@@ -170,47 +174,10 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
170u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) 174u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
171{ 175{
172# if CRC_BE_BITS == 8 176# if CRC_BE_BITS == 8
173 const u32 *b =(u32 *)p;
174 const u32 *tab = crc32table_be; 177 const u32 *tab = crc32table_be;
175 178
176# ifdef __LITTLE_ENDIAN
177# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
178# else
179# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
180# endif
181
182 crc = __cpu_to_be32(crc); 179 crc = __cpu_to_be32(crc);
183 /* Align it */ 180 crc = crc32_body(crc, p, len, tab);
184 if(unlikely(((long)b)&3 && len)){
185 do {
186 u8 *p = (u8 *)b;
187 DO_CRC(*p++);
188 b = (u32 *)p;
189 } while ((--len) && ((long)b)&3 );
190 }
191 if(likely(len >= 4)){
192 /* load data 32 bits wide, xor data 32 bits wide. */
193 size_t save_len = len & 3;
194 len = len >> 2;
195 --b; /* use pre increment below(*++b) for speed */
196 do {
197 crc ^= *++b;
198 DO_CRC(0);
199 DO_CRC(0);
200 DO_CRC(0);
201 DO_CRC(0);
202 } while (--len);
203 b++; /* point to next byte(s) */
204 len = save_len;
205 }
206 /* And the last few bytes */
207 if(len){
208 do {
209 u8 *p = (u8 *)b;
210 DO_CRC(*p++);
211 b = (void *)p;
212 } while (--len);
213 }
214 return __be32_to_cpu(crc); 181 return __be32_to_cpu(crc);
215#undef ENDIAN_SHIFT 182#undef ENDIAN_SHIFT
216#undef DO_CRC 183#undef DO_CRC
diff --git a/lib/ctype.c b/lib/ctype.c
index d02ace14a322..26baa620e95b 100644
--- a/lib/ctype.c
+++ b/lib/ctype.c
@@ -7,30 +7,30 @@
7#include <linux/ctype.h> 7#include <linux/ctype.h>
8#include <linux/module.h> 8#include <linux/module.h>
9 9
10unsigned char _ctype[] = { 10const unsigned char _ctype[] = {
11_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */ 11_C,_C,_C,_C,_C,_C,_C,_C, /* 0-7 */
12_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */ 12_C,_C|_S,_C|_S,_C|_S,_C|_S,_C|_S,_C,_C, /* 8-15 */
13_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */ 13_C,_C,_C,_C,_C,_C,_C,_C, /* 16-23 */
14_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */ 14_C,_C,_C,_C,_C,_C,_C,_C, /* 24-31 */
15_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */ 15_S|_SP,_P,_P,_P,_P,_P,_P,_P, /* 32-39 */
16_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */ 16_P,_P,_P,_P,_P,_P,_P,_P, /* 40-47 */
17_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */ 17_D,_D,_D,_D,_D,_D,_D,_D, /* 48-55 */
18_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */ 18_D,_D,_P,_P,_P,_P,_P,_P, /* 56-63 */
19_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */ 19_P,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U|_X,_U, /* 64-71 */
20_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */ 20_U,_U,_U,_U,_U,_U,_U,_U, /* 72-79 */
21_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */ 21_U,_U,_U,_U,_U,_U,_U,_U, /* 80-87 */
22_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */ 22_U,_U,_U,_P,_P,_P,_P,_P, /* 88-95 */
23_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */ 23_P,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L|_X,_L, /* 96-103 */
24_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */ 24_L,_L,_L,_L,_L,_L,_L,_L, /* 104-111 */
25_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */ 25_L,_L,_L,_L,_L,_L,_L,_L, /* 112-119 */
26_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */ 26_L,_L,_L,_P,_P,_P,_P,_C, /* 120-127 */
270,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */ 270,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 128-143 */
280,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */ 280,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 144-159 */
29_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */ 29_S|_SP,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 160-175 */
30_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */ 30_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P,_P, /* 176-191 */
31_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */ 31_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U,_U, /* 192-207 */
32_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */ 32_U,_U,_U,_U,_U,_U,_U,_P,_U,_U,_U,_U,_U,_U,_U,_L, /* 208-223 */
33_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */ 33_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L,_L, /* 224-239 */
34_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */ 34_L,_L,_L,_L,_L,_L,_L,_P,_L,_L,_L,_L,_L,_L,_L,_L}; /* 240-255 */
35 35
36EXPORT_SYMBOL(_ctype); 36EXPORT_SYMBOL(_ctype);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 2755a3bd16a1..a9a8996d286a 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/debugobjects.h> 10#include <linux/debugobjects.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/sched.h>
12#include <linux/seq_file.h> 13#include <linux/seq_file.h>
13#include <linux/debugfs.h> 14#include <linux/debugfs.h>
14#include <linux/hash.h> 15#include <linux/hash.h>
@@ -25,14 +26,14 @@
25 26
26struct debug_bucket { 27struct debug_bucket {
27 struct hlist_head list; 28 struct hlist_head list;
28 spinlock_t lock; 29 raw_spinlock_t lock;
29}; 30};
30 31
31static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 32static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
32 33
33static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 34static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
34 35
35static DEFINE_SPINLOCK(pool_lock); 36static DEFINE_RAW_SPINLOCK(pool_lock);
36 37
37static HLIST_HEAD(obj_pool); 38static HLIST_HEAD(obj_pool);
38 39
@@ -95,10 +96,10 @@ static int fill_pool(void)
95 if (!new) 96 if (!new)
96 return obj_pool_free; 97 return obj_pool_free;
97 98
98 spin_lock_irqsave(&pool_lock, flags); 99 raw_spin_lock_irqsave(&pool_lock, flags);
99 hlist_add_head(&new->node, &obj_pool); 100 hlist_add_head(&new->node, &obj_pool);
100 obj_pool_free++; 101 obj_pool_free++;
101 spin_unlock_irqrestore(&pool_lock, flags); 102 raw_spin_unlock_irqrestore(&pool_lock, flags);
102 } 103 }
103 return obj_pool_free; 104 return obj_pool_free;
104} 105}
@@ -132,7 +133,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
132{ 133{
133 struct debug_obj *obj = NULL; 134 struct debug_obj *obj = NULL;
134 135
135 spin_lock(&pool_lock); 136 raw_spin_lock(&pool_lock);
136 if (obj_pool.first) { 137 if (obj_pool.first) {
137 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 138 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
138 139
@@ -151,7 +152,7 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
151 if (obj_pool_free < obj_pool_min_free) 152 if (obj_pool_free < obj_pool_min_free)
152 obj_pool_min_free = obj_pool_free; 153 obj_pool_min_free = obj_pool_free;
153 } 154 }
154 spin_unlock(&pool_lock); 155 raw_spin_unlock(&pool_lock);
155 156
156 return obj; 157 return obj;
157} 158}
@@ -164,7 +165,7 @@ static void free_obj_work(struct work_struct *work)
164 struct debug_obj *obj; 165 struct debug_obj *obj;
165 unsigned long flags; 166 unsigned long flags;
166 167
167 spin_lock_irqsave(&pool_lock, flags); 168 raw_spin_lock_irqsave(&pool_lock, flags);
168 while (obj_pool_free > ODEBUG_POOL_SIZE) { 169 while (obj_pool_free > ODEBUG_POOL_SIZE) {
169 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 170 obj = hlist_entry(obj_pool.first, typeof(*obj), node);
170 hlist_del(&obj->node); 171 hlist_del(&obj->node);
@@ -173,11 +174,11 @@ static void free_obj_work(struct work_struct *work)
173 * We release pool_lock across kmem_cache_free() to 174 * We release pool_lock across kmem_cache_free() to
174 * avoid contention on pool_lock. 175 * avoid contention on pool_lock.
175 */ 176 */
176 spin_unlock_irqrestore(&pool_lock, flags); 177 raw_spin_unlock_irqrestore(&pool_lock, flags);
177 kmem_cache_free(obj_cache, obj); 178 kmem_cache_free(obj_cache, obj);
178 spin_lock_irqsave(&pool_lock, flags); 179 raw_spin_lock_irqsave(&pool_lock, flags);
179 } 180 }
180 spin_unlock_irqrestore(&pool_lock, flags); 181 raw_spin_unlock_irqrestore(&pool_lock, flags);
181} 182}
182 183
183/* 184/*
@@ -189,7 +190,7 @@ static void free_object(struct debug_obj *obj)
189 unsigned long flags; 190 unsigned long flags;
190 int sched = 0; 191 int sched = 0;
191 192
192 spin_lock_irqsave(&pool_lock, flags); 193 raw_spin_lock_irqsave(&pool_lock, flags);
193 /* 194 /*
194 * schedule work when the pool is filled and the cache is 195 * schedule work when the pool is filled and the cache is
195 * initialized: 196 * initialized:
@@ -199,7 +200,7 @@ static void free_object(struct debug_obj *obj)
199 hlist_add_head(&obj->node, &obj_pool); 200 hlist_add_head(&obj->node, &obj_pool);
200 obj_pool_free++; 201 obj_pool_free++;
201 obj_pool_used--; 202 obj_pool_used--;
202 spin_unlock_irqrestore(&pool_lock, flags); 203 raw_spin_unlock_irqrestore(&pool_lock, flags);
203 if (sched) 204 if (sched)
204 schedule_work(&debug_obj_work); 205 schedule_work(&debug_obj_work);
205} 206}
@@ -220,9 +221,9 @@ static void debug_objects_oom(void)
220 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); 221 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
221 222
222 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 223 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
223 spin_lock_irqsave(&db->lock, flags); 224 raw_spin_lock_irqsave(&db->lock, flags);
224 hlist_move_list(&db->list, &freelist); 225 hlist_move_list(&db->list, &freelist);
225 spin_unlock_irqrestore(&db->lock, flags); 226 raw_spin_unlock_irqrestore(&db->lock, flags);
226 227
227 /* Now free them */ 228 /* Now free them */
228 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 229 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -302,14 +303,14 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
302 303
303 db = get_bucket((unsigned long) addr); 304 db = get_bucket((unsigned long) addr);
304 305
305 spin_lock_irqsave(&db->lock, flags); 306 raw_spin_lock_irqsave(&db->lock, flags);
306 307
307 obj = lookup_object(addr, db); 308 obj = lookup_object(addr, db);
308 if (!obj) { 309 if (!obj) {
309 obj = alloc_object(addr, db, descr); 310 obj = alloc_object(addr, db, descr);
310 if (!obj) { 311 if (!obj) {
311 debug_objects_enabled = 0; 312 debug_objects_enabled = 0;
312 spin_unlock_irqrestore(&db->lock, flags); 313 raw_spin_unlock_irqrestore(&db->lock, flags);
313 debug_objects_oom(); 314 debug_objects_oom();
314 return; 315 return;
315 } 316 }
@@ -326,7 +327,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
326 case ODEBUG_STATE_ACTIVE: 327 case ODEBUG_STATE_ACTIVE:
327 debug_print_object(obj, "init"); 328 debug_print_object(obj, "init");
328 state = obj->state; 329 state = obj->state;
329 spin_unlock_irqrestore(&db->lock, flags); 330 raw_spin_unlock_irqrestore(&db->lock, flags);
330 debug_object_fixup(descr->fixup_init, addr, state); 331 debug_object_fixup(descr->fixup_init, addr, state);
331 return; 332 return;
332 333
@@ -337,7 +338,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
337 break; 338 break;
338 } 339 }
339 340
340 spin_unlock_irqrestore(&db->lock, flags); 341 raw_spin_unlock_irqrestore(&db->lock, flags);
341} 342}
342 343
343/** 344/**
@@ -384,7 +385,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
384 385
385 db = get_bucket((unsigned long) addr); 386 db = get_bucket((unsigned long) addr);
386 387
387 spin_lock_irqsave(&db->lock, flags); 388 raw_spin_lock_irqsave(&db->lock, flags);
388 389
389 obj = lookup_object(addr, db); 390 obj = lookup_object(addr, db);
390 if (obj) { 391 if (obj) {
@@ -397,7 +398,7 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
397 case ODEBUG_STATE_ACTIVE: 398 case ODEBUG_STATE_ACTIVE:
398 debug_print_object(obj, "activate"); 399 debug_print_object(obj, "activate");
399 state = obj->state; 400 state = obj->state;
400 spin_unlock_irqrestore(&db->lock, flags); 401 raw_spin_unlock_irqrestore(&db->lock, flags);
401 debug_object_fixup(descr->fixup_activate, addr, state); 402 debug_object_fixup(descr->fixup_activate, addr, state);
402 return; 403 return;
403 404
@@ -407,11 +408,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
407 default: 408 default:
408 break; 409 break;
409 } 410 }
410 spin_unlock_irqrestore(&db->lock, flags); 411 raw_spin_unlock_irqrestore(&db->lock, flags);
411 return; 412 return;
412 } 413 }
413 414
414 spin_unlock_irqrestore(&db->lock, flags); 415 raw_spin_unlock_irqrestore(&db->lock, flags);
415 /* 416 /*
416 * This happens when a static object is activated. We 417 * This happens when a static object is activated. We
417 * let the type specific code decide whether this is 418 * let the type specific code decide whether this is
@@ -437,7 +438,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
437 438
438 db = get_bucket((unsigned long) addr); 439 db = get_bucket((unsigned long) addr);
439 440
440 spin_lock_irqsave(&db->lock, flags); 441 raw_spin_lock_irqsave(&db->lock, flags);
441 442
442 obj = lookup_object(addr, db); 443 obj = lookup_object(addr, db);
443 if (obj) { 444 if (obj) {
@@ -462,7 +463,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
462 debug_print_object(&o, "deactivate"); 463 debug_print_object(&o, "deactivate");
463 } 464 }
464 465
465 spin_unlock_irqrestore(&db->lock, flags); 466 raw_spin_unlock_irqrestore(&db->lock, flags);
466} 467}
467 468
468/** 469/**
@@ -482,7 +483,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
482 483
483 db = get_bucket((unsigned long) addr); 484 db = get_bucket((unsigned long) addr);
484 485
485 spin_lock_irqsave(&db->lock, flags); 486 raw_spin_lock_irqsave(&db->lock, flags);
486 487
487 obj = lookup_object(addr, db); 488 obj = lookup_object(addr, db);
488 if (!obj) 489 if (!obj)
@@ -497,7 +498,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
497 case ODEBUG_STATE_ACTIVE: 498 case ODEBUG_STATE_ACTIVE:
498 debug_print_object(obj, "destroy"); 499 debug_print_object(obj, "destroy");
499 state = obj->state; 500 state = obj->state;
500 spin_unlock_irqrestore(&db->lock, flags); 501 raw_spin_unlock_irqrestore(&db->lock, flags);
501 debug_object_fixup(descr->fixup_destroy, addr, state); 502 debug_object_fixup(descr->fixup_destroy, addr, state);
502 return; 503 return;
503 504
@@ -508,7 +509,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
508 break; 509 break;
509 } 510 }
510out_unlock: 511out_unlock:
511 spin_unlock_irqrestore(&db->lock, flags); 512 raw_spin_unlock_irqrestore(&db->lock, flags);
512} 513}
513 514
514/** 515/**
@@ -528,7 +529,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
528 529
529 db = get_bucket((unsigned long) addr); 530 db = get_bucket((unsigned long) addr);
530 531
531 spin_lock_irqsave(&db->lock, flags); 532 raw_spin_lock_irqsave(&db->lock, flags);
532 533
533 obj = lookup_object(addr, db); 534 obj = lookup_object(addr, db);
534 if (!obj) 535 if (!obj)
@@ -538,17 +539,17 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
538 case ODEBUG_STATE_ACTIVE: 539 case ODEBUG_STATE_ACTIVE:
539 debug_print_object(obj, "free"); 540 debug_print_object(obj, "free");
540 state = obj->state; 541 state = obj->state;
541 spin_unlock_irqrestore(&db->lock, flags); 542 raw_spin_unlock_irqrestore(&db->lock, flags);
542 debug_object_fixup(descr->fixup_free, addr, state); 543 debug_object_fixup(descr->fixup_free, addr, state);
543 return; 544 return;
544 default: 545 default:
545 hlist_del(&obj->node); 546 hlist_del(&obj->node);
546 spin_unlock_irqrestore(&db->lock, flags); 547 raw_spin_unlock_irqrestore(&db->lock, flags);
547 free_object(obj); 548 free_object(obj);
548 return; 549 return;
549 } 550 }
550out_unlock: 551out_unlock:
551 spin_unlock_irqrestore(&db->lock, flags); 552 raw_spin_unlock_irqrestore(&db->lock, flags);
552} 553}
553 554
554#ifdef CONFIG_DEBUG_OBJECTS_FREE 555#ifdef CONFIG_DEBUG_OBJECTS_FREE
@@ -574,7 +575,7 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
574 575
575repeat: 576repeat:
576 cnt = 0; 577 cnt = 0;
577 spin_lock_irqsave(&db->lock, flags); 578 raw_spin_lock_irqsave(&db->lock, flags);
578 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 579 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
579 cnt++; 580 cnt++;
580 oaddr = (unsigned long) obj->object; 581 oaddr = (unsigned long) obj->object;
@@ -586,7 +587,7 @@ repeat:
586 debug_print_object(obj, "free"); 587 debug_print_object(obj, "free");
587 descr = obj->descr; 588 descr = obj->descr;
588 state = obj->state; 589 state = obj->state;
589 spin_unlock_irqrestore(&db->lock, flags); 590 raw_spin_unlock_irqrestore(&db->lock, flags);
590 debug_object_fixup(descr->fixup_free, 591 debug_object_fixup(descr->fixup_free,
591 (void *) oaddr, state); 592 (void *) oaddr, state);
592 goto repeat; 593 goto repeat;
@@ -596,7 +597,7 @@ repeat:
596 break; 597 break;
597 } 598 }
598 } 599 }
599 spin_unlock_irqrestore(&db->lock, flags); 600 raw_spin_unlock_irqrestore(&db->lock, flags);
600 601
601 /* Now free them */ 602 /* Now free them */
602 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { 603 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) {
@@ -782,7 +783,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
782 783
783 db = get_bucket((unsigned long) addr); 784 db = get_bucket((unsigned long) addr);
784 785
785 spin_lock_irqsave(&db->lock, flags); 786 raw_spin_lock_irqsave(&db->lock, flags);
786 787
787 obj = lookup_object(addr, db); 788 obj = lookup_object(addr, db);
788 if (!obj && state != ODEBUG_STATE_NONE) { 789 if (!obj && state != ODEBUG_STATE_NONE) {
@@ -806,7 +807,7 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
806 } 807 }
807 res = 0; 808 res = 0;
808out: 809out:
809 spin_unlock_irqrestore(&db->lock, flags); 810 raw_spin_unlock_irqrestore(&db->lock, flags);
810 if (res) 811 if (res)
811 debug_objects_enabled = 0; 812 debug_objects_enabled = 0;
812 return res; 813 return res;
@@ -906,7 +907,7 @@ void __init debug_objects_early_init(void)
906 int i; 907 int i;
907 908
908 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 909 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
909 spin_lock_init(&obj_hash[i].lock); 910 raw_spin_lock_init(&obj_hash[i].lock);
910 911
911 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 912 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
912 hlist_add_head(&obj_static_pool[i].node, &obj_pool); 913 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 600f473a5610..76074209f9a2 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -299,7 +299,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
299 again when using them (during symbol decoding).*/ 299 again when using them (during symbol decoding).*/
300 base = hufGroup->base-1; 300 base = hufGroup->base-1;
301 limit = hufGroup->limit-1; 301 limit = hufGroup->limit-1;
302 /* Calculate permute[]. Concurently, initialize 302 /* Calculate permute[]. Concurrently, initialize
303 * temp[] and limit[]. */ 303 * temp[] and limit[]. */
304 pp = 0; 304 pp = 0;
305 for (i = minLen; i <= maxLen; i++) { 305 for (i = minLen; i <= maxLen; i++) {
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index 68dfce59c1b8..fc686c7a0a0d 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -27,6 +27,11 @@
27 27
28#define GZIP_IOBUF_SIZE (16*1024) 28#define GZIP_IOBUF_SIZE (16*1024)
29 29
30static int nofill(void *buffer, unsigned int len)
31{
32 return -1;
33}
34
30/* Included from initramfs et al code */ 35/* Included from initramfs et al code */
31STATIC int INIT gunzip(unsigned char *buf, int len, 36STATIC int INIT gunzip(unsigned char *buf, int len,
32 int(*fill)(void*, unsigned int), 37 int(*fill)(void*, unsigned int),
@@ -76,6 +81,9 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
76 goto gunzip_nomem4; 81 goto gunzip_nomem4;
77 } 82 }
78 83
84 if (!fill)
85 fill = nofill;
86
79 if (len == 0) 87 if (len == 0)
80 len = fill(zbuf, GZIP_IOBUF_SIZE); 88 len = fill(zbuf, GZIP_IOBUF_SIZE);
81 89
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index 0b954e04bd30..ca82fde81c8f 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -82,6 +82,11 @@ struct rc {
82#define RC_MODEL_TOTAL_BITS 11 82#define RC_MODEL_TOTAL_BITS 11
83 83
84 84
85static int nofill(void *buffer, unsigned int len)
86{
87 return -1;
88}
89
85/* Called twice: once at startup and once in rc_normalize() */ 90/* Called twice: once at startup and once in rc_normalize() */
86static void INIT rc_read(struct rc *rc) 91static void INIT rc_read(struct rc *rc)
87{ 92{
@@ -97,7 +102,10 @@ static inline void INIT rc_init(struct rc *rc,
97 int (*fill)(void*, unsigned int), 102 int (*fill)(void*, unsigned int),
98 char *buffer, int buffer_size) 103 char *buffer, int buffer_size)
99{ 104{
100 rc->fill = fill; 105 if (fill)
106 rc->fill = fill;
107 else
108 rc->fill = nofill;
101 rc->buffer = (uint8_t *)buffer; 109 rc->buffer = (uint8_t *)buffer;
102 rc->buffer_size = buffer_size; 110 rc->buffer_size = buffer_size;
103 rc->buffer_end = rc->buffer + rc->buffer_size; 111 rc->buffer_end = rc->buffer + rc->buffer_size;
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 58a9f9fc609a..d9b08e0f7f55 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -259,7 +259,7 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
259 * times. Without a hardware IOMMU this results in the 259 * times. Without a hardware IOMMU this results in the
260 * same device addresses being put into the dma-debug 260 * same device addresses being put into the dma-debug
261 * hash multiple times too. This can result in false 261 * hash multiple times too. This can result in false
262 * positives being reported. Therfore we implement a 262 * positives being reported. Therefore we implement a
263 * best-fit algorithm here which returns the entry from 263 * best-fit algorithm here which returns the entry from
264 * the hash which fits best to the reference value 264 * the hash which fits best to the reference value
265 * instead of the first-fit. 265 * instead of the first-fit.
@@ -819,9 +819,11 @@ static void check_unmap(struct dma_debug_entry *ref)
819 err_printk(ref->dev, entry, "DMA-API: device driver frees " 819 err_printk(ref->dev, entry, "DMA-API: device driver frees "
820 "DMA memory with different CPU address " 820 "DMA memory with different CPU address "
821 "[device address=0x%016llx] [size=%llu bytes] " 821 "[device address=0x%016llx] [size=%llu bytes] "
822 "[cpu alloc address=%p] [cpu free address=%p]", 822 "[cpu alloc address=0x%016llx] "
823 "[cpu free address=0x%016llx]",
823 ref->dev_addr, ref->size, 824 ref->dev_addr, ref->size,
824 (void *)entry->paddr, (void *)ref->paddr); 825 (unsigned long long)entry->paddr,
826 (unsigned long long)ref->paddr);
825 } 827 }
826 828
827 if (ref->sg_call_ents && ref->type == dma_debug_sg && 829 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index e22c148e4b7f..f93502915988 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -21,6 +21,7 @@
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/ctype.h> 23#include <linux/ctype.h>
24#include <linux/string.h>
24#include <linux/uaccess.h> 25#include <linux/uaccess.h>
25#include <linux/dynamic_debug.h> 26#include <linux/dynamic_debug.h>
26#include <linux/debugfs.h> 27#include <linux/debugfs.h>
@@ -209,8 +210,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
209 char *end; 210 char *end;
210 211
211 /* Skip leading whitespace */ 212 /* Skip leading whitespace */
212 while (*buf && isspace(*buf)) 213 buf = skip_spaces(buf);
213 buf++;
214 if (!*buf) 214 if (!*buf)
215 break; /* oh, it was trailing whitespace */ 215 break; /* oh, it was trailing whitespace */
216 216
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index f97af55bdd96..7e65af70635e 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -1,6 +1,7 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/init.h> 2#include <linux/init.h>
3#include <linux/random.h> 3#include <linux/random.h>
4#include <linux/sched.h>
4#include <linux/stat.h> 5#include <linux/stat.h>
5#include <linux/types.h> 6#include <linux/types.h>
6#include <linux/fs.h> 7#include <linux/fs.h>
diff --git a/lib/genalloc.c b/lib/genalloc.c
index eed2bdb865e7..e67f97495dd5 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/bitmap.h>
14#include <linux/genalloc.h> 15#include <linux/genalloc.h>
15 16
16 17
@@ -114,7 +115,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
114 struct gen_pool_chunk *chunk; 115 struct gen_pool_chunk *chunk;
115 unsigned long addr, flags; 116 unsigned long addr, flags;
116 int order = pool->min_alloc_order; 117 int order = pool->min_alloc_order;
117 int nbits, bit, start_bit, end_bit; 118 int nbits, start_bit, end_bit;
118 119
119 if (size == 0) 120 if (size == 0)
120 return 0; 121 return 0;
@@ -129,29 +130,19 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
129 end_bit -= nbits + 1; 130 end_bit -= nbits + 1;
130 131
131 spin_lock_irqsave(&chunk->lock, flags); 132 spin_lock_irqsave(&chunk->lock, flags);
132 bit = -1; 133 start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0,
133 while (bit + 1 < end_bit) { 134 nbits, 0);
134 bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1); 135 if (start_bit >= end_bit) {
135 if (bit >= end_bit)
136 break;
137
138 start_bit = bit;
139 if (nbits > 1) {
140 bit = find_next_bit(chunk->bits, bit + nbits,
141 bit + 1);
142 if (bit - start_bit < nbits)
143 continue;
144 }
145
146 addr = chunk->start_addr +
147 ((unsigned long)start_bit << order);
148 while (nbits--)
149 __set_bit(start_bit++, chunk->bits);
150 spin_unlock_irqrestore(&chunk->lock, flags); 136 spin_unlock_irqrestore(&chunk->lock, flags);
151 read_unlock(&pool->lock); 137 continue;
152 return addr;
153 } 138 }
139
140 addr = chunk->start_addr + ((unsigned long)start_bit << order);
141
142 bitmap_set(chunk->bits, start_bit, nbits);
154 spin_unlock_irqrestore(&chunk->lock, flags); 143 spin_unlock_irqrestore(&chunk->lock, flags);
144 read_unlock(&pool->lock);
145 return addr;
155 } 146 }
156 read_unlock(&pool->lock); 147 read_unlock(&pool->lock);
157 return 0; 148 return 0;
diff --git a/lib/idr.c b/lib/idr.c
index 80ca9aca038b..1cac726c44bc 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -281,7 +281,7 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
281/** 281/**
282 * idr_get_new_above - allocate new idr entry above or equal to a start id 282 * idr_get_new_above - allocate new idr entry above or equal to a start id
283 * @idp: idr handle 283 * @idp: idr handle
284 * @ptr: pointer you want associated with the ide 284 * @ptr: pointer you want associated with the id
285 * @start_id: id to start search at 285 * @start_id: id to start search at
286 * @id: pointer to the allocated handle 286 * @id: pointer to the allocated handle
287 * 287 *
@@ -313,7 +313,7 @@ EXPORT_SYMBOL(idr_get_new_above);
313/** 313/**
314 * idr_get_new - allocate new idr entry 314 * idr_get_new - allocate new idr entry
315 * @idp: idr handle 315 * @idp: idr handle
316 * @ptr: pointer you want associated with the ide 316 * @ptr: pointer you want associated with the id
317 * @id: pointer to the allocated handle 317 * @id: pointer to the allocated handle
318 * 318 *
319 * This is the allocate id function. It should be called with any 319 * This is the allocate id function. It should be called with any
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index 75dbda03f4fb..c0251f4ad08b 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -3,41 +3,7 @@
3 */ 3 */
4 4
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/bitops.h> 6#include <linux/bitmap.h>
7
8static unsigned long find_next_zero_area(unsigned long *map,
9 unsigned long size,
10 unsigned long start,
11 unsigned int nr,
12 unsigned long align_mask)
13{
14 unsigned long index, end, i;
15again:
16 index = find_next_zero_bit(map, size, start);
17
18 /* Align allocation */
19 index = (index + align_mask) & ~align_mask;
20
21 end = index + nr;
22 if (end >= size)
23 return -1;
24 for (i = index; i < end; i++) {
25 if (test_bit(i, map)) {
26 start = i+1;
27 goto again;
28 }
29 }
30 return index;
31}
32
33void iommu_area_reserve(unsigned long *map, unsigned long i, int len)
34{
35 unsigned long end = i + len;
36 while (i < end) {
37 __set_bit(i, map);
38 i++;
39 }
40}
41 7
42int iommu_is_span_boundary(unsigned int index, unsigned int nr, 8int iommu_is_span_boundary(unsigned int index, unsigned int nr,
43 unsigned long shift, 9 unsigned long shift,
@@ -55,31 +21,24 @@ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
55 unsigned long align_mask) 21 unsigned long align_mask)
56{ 22{
57 unsigned long index; 23 unsigned long index;
24
25 /* We don't want the last of the limit */
26 size -= 1;
58again: 27again:
59 index = find_next_zero_area(map, size, start, nr, align_mask); 28 index = bitmap_find_next_zero_area(map, size, start, nr, align_mask);
60 if (index != -1) { 29 if (index < size) {
61 if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { 30 if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
62 /* we could do more effectively */ 31 /* we could do more effectively */
63 start = index + 1; 32 start = index + 1;
64 goto again; 33 goto again;
65 } 34 }
66 iommu_area_reserve(map, index, nr); 35 bitmap_set(map, index, nr);
36 return index;
67 } 37 }
68 return index; 38 return -1;
69} 39}
70EXPORT_SYMBOL(iommu_area_alloc); 40EXPORT_SYMBOL(iommu_area_alloc);
71 41
72void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
73{
74 unsigned long end = start + nr;
75
76 while (start < end) {
77 __clear_bit(start, map);
78 start++;
79 }
80}
81EXPORT_SYMBOL(iommu_area_free);
82
83unsigned long iommu_num_pages(unsigned long addr, unsigned long len, 42unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
84 unsigned long io_page_size) 43 unsigned long io_page_size)
85{ 44{
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 39f1029e3525..b135d04aa48a 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -5,10 +5,13 @@
5 * relegated to obsolescence, but used by various less 5 * relegated to obsolescence, but used by various less
6 * important (or lazy) subsystems. 6 * important (or lazy) subsystems.
7 */ 7 */
8#include <linux/smp_lock.h>
9#include <linux/module.h> 8#include <linux/module.h>
10#include <linux/kallsyms.h> 9#include <linux/kallsyms.h>
11#include <linux/semaphore.h> 10#include <linux/semaphore.h>
11#include <linux/smp_lock.h>
12
13#define CREATE_TRACE_POINTS
14#include <trace/events/bkl.h>
12 15
13/* 16/*
14 * The 'big kernel lock' 17 * The 'big kernel lock'
@@ -20,7 +23,7 @@
20 * 23 *
21 * Don't use in new code. 24 * Don't use in new code.
22 */ 25 */
23static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); 26static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag);
24 27
25 28
26/* 29/*
@@ -33,12 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
33 * If it successfully gets the lock, it should increment 36 * If it successfully gets the lock, it should increment
34 * the preemption count like any spinlock does. 37 * the preemption count like any spinlock does.
35 * 38 *
36 * (This works on UP too - _raw_spin_trylock will never 39 * (This works on UP too - do_raw_spin_trylock will never
37 * return false in that case) 40 * return false in that case)
38 */ 41 */
39int __lockfunc __reacquire_kernel_lock(void) 42int __lockfunc __reacquire_kernel_lock(void)
40{ 43{
41 while (!_raw_spin_trylock(&kernel_flag)) { 44 while (!do_raw_spin_trylock(&kernel_flag)) {
42 if (need_resched()) 45 if (need_resched())
43 return -EAGAIN; 46 return -EAGAIN;
44 cpu_relax(); 47 cpu_relax();
@@ -49,27 +52,27 @@ int __lockfunc __reacquire_kernel_lock(void)
49 52
50void __lockfunc __release_kernel_lock(void) 53void __lockfunc __release_kernel_lock(void)
51{ 54{
52 _raw_spin_unlock(&kernel_flag); 55 do_raw_spin_unlock(&kernel_flag);
53 preempt_enable_no_resched(); 56 preempt_enable_no_resched();
54} 57}
55 58
56/* 59/*
57 * These are the BKL spinlocks - we try to be polite about preemption. 60 * These are the BKL spinlocks - we try to be polite about preemption.
58 * If SMP is not on (ie UP preemption), this all goes away because the 61 * If SMP is not on (ie UP preemption), this all goes away because the
59 * _raw_spin_trylock() will always succeed. 62 * do_raw_spin_trylock() will always succeed.
60 */ 63 */
61#ifdef CONFIG_PREEMPT 64#ifdef CONFIG_PREEMPT
62static inline void __lock_kernel(void) 65static inline void __lock_kernel(void)
63{ 66{
64 preempt_disable(); 67 preempt_disable();
65 if (unlikely(!_raw_spin_trylock(&kernel_flag))) { 68 if (unlikely(!do_raw_spin_trylock(&kernel_flag))) {
66 /* 69 /*
67 * If preemption was disabled even before this 70 * If preemption was disabled even before this
68 * was called, there's nothing we can be polite 71 * was called, there's nothing we can be polite
69 * about - just spin. 72 * about - just spin.
70 */ 73 */
71 if (preempt_count() > 1) { 74 if (preempt_count() > 1) {
72 _raw_spin_lock(&kernel_flag); 75 do_raw_spin_lock(&kernel_flag);
73 return; 76 return;
74 } 77 }
75 78
@@ -79,10 +82,10 @@ static inline void __lock_kernel(void)
79 */ 82 */
80 do { 83 do {
81 preempt_enable(); 84 preempt_enable();
82 while (spin_is_locked(&kernel_flag)) 85 while (raw_spin_is_locked(&kernel_flag))
83 cpu_relax(); 86 cpu_relax();
84 preempt_disable(); 87 preempt_disable();
85 } while (!_raw_spin_trylock(&kernel_flag)); 88 } while (!do_raw_spin_trylock(&kernel_flag));
86 } 89 }
87} 90}
88 91
@@ -93,7 +96,7 @@ static inline void __lock_kernel(void)
93 */ 96 */
94static inline void __lock_kernel(void) 97static inline void __lock_kernel(void)
95{ 98{
96 _raw_spin_lock(&kernel_flag); 99 do_raw_spin_lock(&kernel_flag);
97} 100}
98#endif 101#endif
99 102
@@ -103,7 +106,7 @@ static inline void __unlock_kernel(void)
103 * the BKL is not covered by lockdep, so we open-code the 106 * the BKL is not covered by lockdep, so we open-code the
104 * unlocking sequence (and thus avoid the dep-chain ops): 107 * unlocking sequence (and thus avoid the dep-chain ops):
105 */ 108 */
106 _raw_spin_unlock(&kernel_flag); 109 do_raw_spin_unlock(&kernel_flag);
107 preempt_enable(); 110 preempt_enable();
108} 111}
109 112
@@ -113,21 +116,28 @@ static inline void __unlock_kernel(void)
113 * This cannot happen asynchronously, so we only need to 116 * This cannot happen asynchronously, so we only need to
114 * worry about other CPU's. 117 * worry about other CPU's.
115 */ 118 */
116void __lockfunc lock_kernel(void) 119void __lockfunc _lock_kernel(const char *func, const char *file, int line)
117{ 120{
118 int depth = current->lock_depth+1; 121 int depth = current->lock_depth + 1;
119 if (likely(!depth)) 122
123 trace_lock_kernel(func, file, line);
124
125 if (likely(!depth)) {
126 might_sleep();
120 __lock_kernel(); 127 __lock_kernel();
128 }
121 current->lock_depth = depth; 129 current->lock_depth = depth;
122} 130}
123 131
124void __lockfunc unlock_kernel(void) 132void __lockfunc _unlock_kernel(const char *func, const char *file, int line)
125{ 133{
126 BUG_ON(current->lock_depth < 0); 134 BUG_ON(current->lock_depth < 0);
127 if (likely(--current->lock_depth < 0)) 135 if (likely(--current->lock_depth < 0))
128 __unlock_kernel(); 136 __unlock_kernel();
137
138 trace_unlock_kernel(func, file, line);
129} 139}
130 140
131EXPORT_SYMBOL(lock_kernel); 141EXPORT_SYMBOL(_lock_kernel);
132EXPORT_SYMBOL(unlock_kernel); 142EXPORT_SYMBOL(_unlock_kernel);
133 143
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
new file mode 100644
index 000000000000..270de9d31b8c
--- /dev/null
+++ b/lib/lru_cache.c
@@ -0,0 +1,560 @@
1/*
2 lru_cache.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
26#include <linux/module.h>
27#include <linux/bitops.h>
28#include <linux/slab.h>
29#include <linux/string.h> /* for memset */
30#include <linux/seq_file.h> /* for seq_printf */
31#include <linux/lru_cache.h>
32
33MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
34 "Lars Ellenberg <lars@linbit.com>");
35MODULE_DESCRIPTION("lru_cache - Track sets of hot objects");
36MODULE_LICENSE("GPL");
37
38/* this is developers aid only.
39 * it catches concurrent access (lack of locking on the users part) */
40#define PARANOIA_ENTRY() do { \
41 BUG_ON(!lc); \
42 BUG_ON(!lc->nr_elements); \
43 BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \
44} while (0)
45
46#define RETURN(x...) do { \
47 clear_bit(__LC_PARANOIA, &lc->flags); \
48 smp_mb__after_clear_bit(); return x ; } while (0)
49
50/* BUG() if e is not one of the elements tracked by lc */
51#define PARANOIA_LC_ELEMENT(lc, e) do { \
52 struct lru_cache *lc_ = (lc); \
53 struct lc_element *e_ = (e); \
54 unsigned i = e_->lc_index; \
55 BUG_ON(i >= lc_->nr_elements); \
56 BUG_ON(lc_->lc_element[i] != e_); } while (0)
57
58/**
59 * lc_create - prepares to track objects in an active set
60 * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
61 * @e_count: number of elements allowed to be active simultaneously
62 * @e_size: size of the tracked objects
63 * @e_off: offset to the &struct lc_element member in a tracked object
64 *
65 * Returns a pointer to a newly initialized struct lru_cache on success,
66 * or NULL on (allocation) failure.
67 */
68struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
69 unsigned e_count, size_t e_size, size_t e_off)
70{
71 struct hlist_head *slot = NULL;
72 struct lc_element **element = NULL;
73 struct lru_cache *lc;
74 struct lc_element *e;
75 unsigned cache_obj_size = kmem_cache_size(cache);
76 unsigned i;
77
78 WARN_ON(cache_obj_size < e_size);
79 if (cache_obj_size < e_size)
80 return NULL;
81
82 /* e_count too big; would probably fail the allocation below anyways.
83 * for typical use cases, e_count should be few thousand at most. */
84 if (e_count > LC_MAX_ACTIVE)
85 return NULL;
86
87 slot = kzalloc(e_count * sizeof(struct hlist_head*), GFP_KERNEL);
88 if (!slot)
89 goto out_fail;
90 element = kzalloc(e_count * sizeof(struct lc_element *), GFP_KERNEL);
91 if (!element)
92 goto out_fail;
93
94 lc = kzalloc(sizeof(*lc), GFP_KERNEL);
95 if (!lc)
96 goto out_fail;
97
98 INIT_LIST_HEAD(&lc->in_use);
99 INIT_LIST_HEAD(&lc->lru);
100 INIT_LIST_HEAD(&lc->free);
101
102 lc->name = name;
103 lc->element_size = e_size;
104 lc->element_off = e_off;
105 lc->nr_elements = e_count;
106 lc->new_number = LC_FREE;
107 lc->lc_cache = cache;
108 lc->lc_element = element;
109 lc->lc_slot = slot;
110
111 /* preallocate all objects */
112 for (i = 0; i < e_count; i++) {
113 void *p = kmem_cache_alloc(cache, GFP_KERNEL);
114 if (!p)
115 break;
116 memset(p, 0, lc->element_size);
117 e = p + e_off;
118 e->lc_index = i;
119 e->lc_number = LC_FREE;
120 list_add(&e->list, &lc->free);
121 element[i] = e;
122 }
123 if (i == e_count)
124 return lc;
125
126 /* else: could not allocate all elements, give up */
127 for (i--; i; i--) {
128 void *p = element[i];
129 kmem_cache_free(cache, p - e_off);
130 }
131 kfree(lc);
132out_fail:
133 kfree(element);
134 kfree(slot);
135 return NULL;
136}
137
138void lc_free_by_index(struct lru_cache *lc, unsigned i)
139{
140 void *p = lc->lc_element[i];
141 WARN_ON(!p);
142 if (p) {
143 p -= lc->element_off;
144 kmem_cache_free(lc->lc_cache, p);
145 }
146}
147
148/**
149 * lc_destroy - frees memory allocated by lc_create()
150 * @lc: the lru cache to destroy
151 */
152void lc_destroy(struct lru_cache *lc)
153{
154 unsigned i;
155 if (!lc)
156 return;
157 for (i = 0; i < lc->nr_elements; i++)
158 lc_free_by_index(lc, i);
159 kfree(lc->lc_element);
160 kfree(lc->lc_slot);
161 kfree(lc);
162}
163
164/**
165 * lc_reset - does a full reset for @lc and the hash table slots.
166 * @lc: the lru cache to operate on
167 *
168 * It is roughly the equivalent of re-allocating a fresh lru_cache object,
169 * basically a short cut to lc_destroy(lc); lc = lc_create(...);
170 */
171void lc_reset(struct lru_cache *lc)
172{
173 unsigned i;
174
175 INIT_LIST_HEAD(&lc->in_use);
176 INIT_LIST_HEAD(&lc->lru);
177 INIT_LIST_HEAD(&lc->free);
178 lc->used = 0;
179 lc->hits = 0;
180 lc->misses = 0;
181 lc->starving = 0;
182 lc->dirty = 0;
183 lc->changed = 0;
184 lc->flags = 0;
185 lc->changing_element = NULL;
186 lc->new_number = LC_FREE;
187 memset(lc->lc_slot, 0, sizeof(struct hlist_head) * lc->nr_elements);
188
189 for (i = 0; i < lc->nr_elements; i++) {
190 struct lc_element *e = lc->lc_element[i];
191 void *p = e;
192 p -= lc->element_off;
193 memset(p, 0, lc->element_size);
194 /* re-init it */
195 e->lc_index = i;
196 e->lc_number = LC_FREE;
197 list_add(&e->list, &lc->free);
198 }
199}
200
201/**
202 * lc_seq_printf_stats - print stats about @lc into @seq
203 * @seq: the seq_file to print into
204 * @lc: the lru cache to print statistics of
205 */
206size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
207{
208 /* NOTE:
209 * total calls to lc_get are
210 * (starving + hits + misses)
211 * misses include "dirty" count (update from an other thread in
212 * progress) and "changed", when this in fact lead to an successful
213 * update of the cache.
214 */
215 return seq_printf(seq, "\t%s: used:%u/%u "
216 "hits:%lu misses:%lu starving:%lu dirty:%lu changed:%lu\n",
217 lc->name, lc->used, lc->nr_elements,
218 lc->hits, lc->misses, lc->starving, lc->dirty, lc->changed);
219}
220
221static struct hlist_head *lc_hash_slot(struct lru_cache *lc, unsigned int enr)
222{
223 return lc->lc_slot + (enr % lc->nr_elements);
224}
225
226
227/**
228 * lc_find - find element by label, if present in the hash table
229 * @lc: The lru_cache object
230 * @enr: element number
231 *
232 * Returns the pointer to an element, if the element with the requested
233 * "label" or element number is present in the hash table,
234 * or NULL if not found. Does not change the refcnt.
235 */
236struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr)
237{
238 struct hlist_node *n;
239 struct lc_element *e;
240
241 BUG_ON(!lc);
242 BUG_ON(!lc->nr_elements);
243 hlist_for_each_entry(e, n, lc_hash_slot(lc, enr), colision) {
244 if (e->lc_number == enr)
245 return e;
246 }
247 return NULL;
248}
249
250/* returned element will be "recycled" immediately */
251static struct lc_element *lc_evict(struct lru_cache *lc)
252{
253 struct list_head *n;
254 struct lc_element *e;
255
256 if (list_empty(&lc->lru))
257 return NULL;
258
259 n = lc->lru.prev;
260 e = list_entry(n, struct lc_element, list);
261
262 PARANOIA_LC_ELEMENT(lc, e);
263
264 list_del(&e->list);
265 hlist_del(&e->colision);
266 return e;
267}
268
269/**
270 * lc_del - removes an element from the cache
271 * @lc: The lru_cache object
272 * @e: The element to remove
273 *
274 * @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list,
275 * sets @e->enr to %LC_FREE.
276 */
277void lc_del(struct lru_cache *lc, struct lc_element *e)
278{
279 PARANOIA_ENTRY();
280 PARANOIA_LC_ELEMENT(lc, e);
281 BUG_ON(e->refcnt);
282
283 e->lc_number = LC_FREE;
284 hlist_del_init(&e->colision);
285 list_move(&e->list, &lc->free);
286 RETURN();
287}
288
289static struct lc_element *lc_get_unused_element(struct lru_cache *lc)
290{
291 struct list_head *n;
292
293 if (list_empty(&lc->free))
294 return lc_evict(lc);
295
296 n = lc->free.next;
297 list_del(n);
298 return list_entry(n, struct lc_element, list);
299}
300
301static int lc_unused_element_available(struct lru_cache *lc)
302{
303 if (!list_empty(&lc->free))
304 return 1; /* something on the free list */
305 if (!list_empty(&lc->lru))
306 return 1; /* something to evict */
307
308 return 0;
309}
310
311
312/**
313 * lc_get - get element by label, maybe change the active set
314 * @lc: the lru cache to operate on
315 * @enr: the label to look up
316 *
317 * Finds an element in the cache, increases its usage count,
318 * "touches" and returns it.
319 *
320 * In case the requested number is not present, it needs to be added to the
321 * cache. Therefore it is possible that an other element becomes evicted from
322 * the cache. In either case, the user is notified so he is able to e.g. keep
323 * a persistent log of the cache changes, and therefore the objects in use.
324 *
325 * Return values:
326 * NULL
327 * The cache was marked %LC_STARVING,
328 * or the requested label was not in the active set
329 * and a changing transaction is still pending (@lc was marked %LC_DIRTY).
330 * Or no unused or free element could be recycled (@lc will be marked as
331 * %LC_STARVING, blocking further lc_get() operations).
332 *
333 * pointer to the element with the REQUESTED element number.
334 * In this case, it can be used right away
335 *
336 * pointer to an UNUSED element with some different element number,
337 * where that different number may also be %LC_FREE.
338 *
339 * In this case, the cache is marked %LC_DIRTY (blocking further changes),
340 * and the returned element pointer is removed from the lru list and
341 * hash collision chains. The user now should do whatever housekeeping
342 * is necessary.
343 * Then he must call lc_changed(lc,element_pointer), to finish
344 * the change.
345 *
346 * NOTE: The user needs to check the lc_number on EACH use, so he recognizes
347 * any cache set change.
348 */
349struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
350{
351 struct lc_element *e;
352
353 PARANOIA_ENTRY();
354 if (lc->flags & LC_STARVING) {
355 ++lc->starving;
356 RETURN(NULL);
357 }
358
359 e = lc_find(lc, enr);
360 if (e) {
361 ++lc->hits;
362 if (e->refcnt++ == 0)
363 lc->used++;
364 list_move(&e->list, &lc->in_use); /* Not evictable... */
365 RETURN(e);
366 }
367
368 ++lc->misses;
369
370 /* In case there is nothing available and we can not kick out
371 * the LRU element, we have to wait ...
372 */
373 if (!lc_unused_element_available(lc)) {
374 __set_bit(__LC_STARVING, &lc->flags);
375 RETURN(NULL);
376 }
377
378 /* it was not present in the active set.
379 * we are going to recycle an unused (or even "free") element.
380 * user may need to commit a transaction to record that change.
381 * we serialize on flags & TF_DIRTY */
382 if (test_and_set_bit(__LC_DIRTY, &lc->flags)) {
383 ++lc->dirty;
384 RETURN(NULL);
385 }
386
387 e = lc_get_unused_element(lc);
388 BUG_ON(!e);
389
390 clear_bit(__LC_STARVING, &lc->flags);
391 BUG_ON(++e->refcnt != 1);
392 lc->used++;
393
394 lc->changing_element = e;
395 lc->new_number = enr;
396
397 RETURN(e);
398}
399
400/* similar to lc_get,
401 * but only gets a new reference on an existing element.
402 * you either get the requested element, or NULL.
403 * will be consolidated into one function.
404 */
405struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr)
406{
407 struct lc_element *e;
408
409 PARANOIA_ENTRY();
410 if (lc->flags & LC_STARVING) {
411 ++lc->starving;
412 RETURN(NULL);
413 }
414
415 e = lc_find(lc, enr);
416 if (e) {
417 ++lc->hits;
418 if (e->refcnt++ == 0)
419 lc->used++;
420 list_move(&e->list, &lc->in_use); /* Not evictable... */
421 }
422 RETURN(e);
423}
424
425/**
426 * lc_changed - tell @lc that the change has been recorded
427 * @lc: the lru cache to operate on
428 * @e: the element pending label change
429 */
430void lc_changed(struct lru_cache *lc, struct lc_element *e)
431{
432 PARANOIA_ENTRY();
433 BUG_ON(e != lc->changing_element);
434 PARANOIA_LC_ELEMENT(lc, e);
435 ++lc->changed;
436 e->lc_number = lc->new_number;
437 list_add(&e->list, &lc->in_use);
438 hlist_add_head(&e->colision, lc_hash_slot(lc, lc->new_number));
439 lc->changing_element = NULL;
440 lc->new_number = LC_FREE;
441 clear_bit(__LC_DIRTY, &lc->flags);
442 smp_mb__after_clear_bit();
443 RETURN();
444}
445
446
447/**
448 * lc_put - give up refcnt of @e
449 * @lc: the lru cache to operate on
450 * @e: the element to put
451 *
452 * If refcnt reaches zero, the element is moved to the lru list,
453 * and a %LC_STARVING (if set) is cleared.
454 * Returns the new (post-decrement) refcnt.
455 */
456unsigned int lc_put(struct lru_cache *lc, struct lc_element *e)
457{
458 PARANOIA_ENTRY();
459 PARANOIA_LC_ELEMENT(lc, e);
460 BUG_ON(e->refcnt == 0);
461 BUG_ON(e == lc->changing_element);
462 if (--e->refcnt == 0) {
463 /* move it to the front of LRU. */
464 list_move(&e->list, &lc->lru);
465 lc->used--;
466 clear_bit(__LC_STARVING, &lc->flags);
467 smp_mb__after_clear_bit();
468 }
469 RETURN(e->refcnt);
470}
471
472/**
473 * lc_element_by_index
474 * @lc: the lru cache to operate on
475 * @i: the index of the element to return
476 */
477struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i)
478{
479 BUG_ON(i >= lc->nr_elements);
480 BUG_ON(lc->lc_element[i] == NULL);
481 BUG_ON(lc->lc_element[i]->lc_index != i);
482 return lc->lc_element[i];
483}
484
485/**
486 * lc_index_of
487 * @lc: the lru cache to operate on
488 * @e: the element to query for its index position in lc->element
489 */
490unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e)
491{
492 PARANOIA_LC_ELEMENT(lc, e);
493 return e->lc_index;
494}
495
496/**
497 * lc_set - associate index with label
498 * @lc: the lru cache to operate on
499 * @enr: the label to set
500 * @index: the element index to associate label with.
501 *
502 * Used to initialize the active set to some previously recorded state.
503 */
504void lc_set(struct lru_cache *lc, unsigned int enr, int index)
505{
506 struct lc_element *e;
507
508 if (index < 0 || index >= lc->nr_elements)
509 return;
510
511 e = lc_element_by_index(lc, index);
512 e->lc_number = enr;
513
514 hlist_del_init(&e->colision);
515 hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
516 list_move(&e->list, e->refcnt ? &lc->in_use : &lc->lru);
517}
518
519/**
520 * lc_dump - Dump a complete LRU cache to seq in textual form.
521 * @lc: the lru cache to operate on
522 * @seq: the &struct seq_file pointer to seq_printf into
523 * @utext: user supplied "heading" or other info
524 * @detail: function pointer the user may provide to dump further details
525 * of the object the lc_element is embedded in.
526 */
527void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
528 void (*detail) (struct seq_file *, struct lc_element *))
529{
530 unsigned int nr_elements = lc->nr_elements;
531 struct lc_element *e;
532 int i;
533
534 seq_printf(seq, "\tnn: lc_number refcnt %s\n ", utext);
535 for (i = 0; i < nr_elements; i++) {
536 e = lc_element_by_index(lc, i);
537 if (e->lc_number == LC_FREE) {
538 seq_printf(seq, "\t%2d: FREE\n", i);
539 } else {
540 seq_printf(seq, "\t%2d: %4u %4u ", i,
541 e->lc_number, e->refcnt);
542 detail(seq, e);
543 }
544 }
545}
546
547EXPORT_SYMBOL(lc_create);
548EXPORT_SYMBOL(lc_reset);
549EXPORT_SYMBOL(lc_destroy);
550EXPORT_SYMBOL(lc_set);
551EXPORT_SYMBOL(lc_del);
552EXPORT_SYMBOL(lc_try_get);
553EXPORT_SYMBOL(lc_find);
554EXPORT_SYMBOL(lc_get);
555EXPORT_SYMBOL(lc_put);
556EXPORT_SYMBOL(lc_changed);
557EXPORT_SYMBOL(lc_element_by_index);
558EXPORT_SYMBOL(lc_index_of);
559EXPORT_SYMBOL(lc_seq_printf_stats);
560EXPORT_SYMBOL(lc_seq_dump_details);
diff --git a/lib/parser.c b/lib/parser.c
index b00d02059a5f..fb34977246bb 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -56,13 +56,16 @@ static int match_one(char *s, const char *p, substring_t args[])
56 56
57 args[argc].from = s; 57 args[argc].from = s;
58 switch (*p++) { 58 switch (*p++) {
59 case 's': 59 case 's': {
60 if (strlen(s) == 0) 60 size_t str_len = strlen(s);
61
62 if (str_len == 0)
61 return 0; 63 return 0;
62 else if (len == -1 || len > strlen(s)) 64 if (len == -1 || len > str_len)
63 len = strlen(s); 65 len = str_len;
64 args[argc].to = s + len; 66 args[argc].to = s + len;
65 break; 67 break;
68 }
66 case 'd': 69 case 'd':
67 simple_strtol(s, &args[argc].to, 0); 70 simple_strtol(s, &args[argc].to, 0);
68 goto num; 71 goto num;
diff --git a/lib/plist.c b/lib/plist.c
index d6c64a824e1d..1471988d9190 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -54,9 +54,11 @@ static void plist_check_list(struct list_head *top)
54 54
55static void plist_check_head(struct plist_head *head) 55static void plist_check_head(struct plist_head *head)
56{ 56{
57 WARN_ON(!head->lock); 57 WARN_ON(!head->rawlock && !head->spinlock);
58 if (head->lock) 58 if (head->rawlock)
59 WARN_ON_SMP(!spin_is_locked(head->lock)); 59 WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
60 if (head->spinlock)
61 WARN_ON_SMP(!spin_is_locked(head->spinlock));
60 plist_check_list(&head->prio_list); 62 plist_check_list(&head->prio_list);
61 plist_check_list(&head->node_list); 63 plist_check_list(&head->node_list);
62} 64}
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 23abbd93cae1..92cdd9936e3d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -200,6 +200,9 @@ radix_tree_node_free(struct radix_tree_node *node)
200 * ensure that the addition of a single element in the tree cannot fail. On 200 * ensure that the addition of a single element in the tree cannot fail. On
201 * success, return zero, with preemption disabled. On error, return -ENOMEM 201 * success, return zero, with preemption disabled. On error, return -ENOMEM
202 * with preemption not disabled. 202 * with preemption not disabled.
203 *
204 * To make use of this facility, the radix tree must be initialised without
205 * __GFP_WAIT being passed to INIT_RADIX_TREE().
203 */ 206 */
204int radix_tree_preload(gfp_t gfp_mask) 207int radix_tree_preload(gfp_t gfp_mask)
205{ 208{
@@ -543,7 +546,6 @@ out:
543} 546}
544EXPORT_SYMBOL(radix_tree_tag_clear); 547EXPORT_SYMBOL(radix_tree_tag_clear);
545 548
546#ifndef __KERNEL__ /* Only the test harness uses this at present */
547/** 549/**
548 * radix_tree_tag_get - get a tag on a radix tree node 550 * radix_tree_tag_get - get a tag on a radix tree node
549 * @root: radix tree root 551 * @root: radix tree root
@@ -606,7 +608,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
606 } 608 }
607} 609}
608EXPORT_SYMBOL(radix_tree_tag_get); 610EXPORT_SYMBOL(radix_tree_tag_get);
609#endif
610 611
611/** 612/**
612 * radix_tree_next_hole - find the next hole (not-present entry) 613 * radix_tree_next_hole - find the next hole (not-present entry)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 26187edcc7ea..09f5ce1810dc 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -7,15 +7,12 @@
7 * parameter. Now every user can use their own standalone ratelimit_state. 7 * parameter. Now every user can use their own standalone ratelimit_state.
8 * 8 *
9 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
10 *
11 */ 10 */
12 11
13#include <linux/kernel.h> 12#include <linux/ratelimit.h>
14#include <linux/jiffies.h> 13#include <linux/jiffies.h>
15#include <linux/module.h> 14#include <linux/module.h>
16 15
17static DEFINE_SPINLOCK(ratelimit_lock);
18
19/* 16/*
20 * __ratelimit - rate limiting 17 * __ratelimit - rate limiting
21 * @rs: ratelimit_state data 18 * @rs: ratelimit_state data
@@ -23,35 +20,43 @@ static DEFINE_SPINLOCK(ratelimit_lock);
23 * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks 20 * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks
24 * in every @rs->ratelimit_jiffies 21 * in every @rs->ratelimit_jiffies
25 */ 22 */
26int __ratelimit(struct ratelimit_state *rs) 23int ___ratelimit(struct ratelimit_state *rs, const char *func)
27{ 24{
28 unsigned long flags; 25 unsigned long flags;
26 int ret;
29 27
30 if (!rs->interval) 28 if (!rs->interval)
31 return 1; 29 return 1;
32 30
33 spin_lock_irqsave(&ratelimit_lock, flags); 31 /*
32 * If we contend on this state's lock then almost
33 * by definition we are too busy to print a message,
34 * in addition to the one that will be printed by
35 * the entity that is holding the lock already:
36 */
37 if (!spin_trylock_irqsave(&rs->lock, flags))
38 return 1;
39
34 if (!rs->begin) 40 if (!rs->begin)
35 rs->begin = jiffies; 41 rs->begin = jiffies;
36 42
37 if (time_is_before_jiffies(rs->begin + rs->interval)) { 43 if (time_is_before_jiffies(rs->begin + rs->interval)) {
38 if (rs->missed) 44 if (rs->missed)
39 printk(KERN_WARNING "%s: %d callbacks suppressed\n", 45 printk(KERN_WARNING "%s: %d callbacks suppressed\n",
40 __func__, rs->missed); 46 func, rs->missed);
41 rs->begin = 0; 47 rs->begin = 0;
42 rs->printed = 0; 48 rs->printed = 0;
43 rs->missed = 0; 49 rs->missed = 0;
44 } 50 }
45 if (rs->burst && rs->burst > rs->printed) 51 if (rs->burst && rs->burst > rs->printed) {
46 goto print; 52 rs->printed++;
47 53 ret = 1;
48 rs->missed++; 54 } else {
49 spin_unlock_irqrestore(&ratelimit_lock, flags); 55 rs->missed++;
50 return 0; 56 ret = 0;
57 }
58 spin_unlock_irqrestore(&rs->lock, flags);
51 59
52print: 60 return ret;
53 rs->printed++;
54 spin_unlock_irqrestore(&ratelimit_lock, flags);
55 return 1;
56} 61}
57EXPORT_SYMBOL(__ratelimit); 62EXPORT_SYMBOL(___ratelimit);
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 9df3ca56db11..ccf95bff7984 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -17,6 +17,19 @@ struct rwsem_waiter {
17#define RWSEM_WAITING_FOR_WRITE 0x00000002 17#define RWSEM_WAITING_FOR_WRITE 0x00000002
18}; 18};
19 19
20int rwsem_is_locked(struct rw_semaphore *sem)
21{
22 int ret = 1;
23 unsigned long flags;
24
25 if (spin_trylock_irqsave(&sem->wait_lock, flags)) {
26 ret = (sem->activity != 0);
27 spin_unlock_irqrestore(&sem->wait_lock, flags);
28 }
29 return ret;
30}
31EXPORT_SYMBOL(rwsem_is_locked);
32
20/* 33/*
21 * initialise the semaphore 34 * initialise the semaphore
22 */ 35 */
@@ -34,6 +47,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
34 spin_lock_init(&sem->wait_lock); 47 spin_lock_init(&sem->wait_lock);
35 INIT_LIST_HEAD(&sem->wait_list); 48 INIT_LIST_HEAD(&sem->wait_list);
36} 49}
50EXPORT_SYMBOL(__init_rwsem);
37 51
38/* 52/*
39 * handle the lock release when processes blocked on it that can now run 53 * handle the lock release when processes blocked on it that can now run
@@ -305,12 +319,3 @@ void __downgrade_write(struct rw_semaphore *sem)
305 spin_unlock_irqrestore(&sem->wait_lock, flags); 319 spin_unlock_irqrestore(&sem->wait_lock, flags);
306} 320}
307 321
308EXPORT_SYMBOL(__init_rwsem);
309EXPORT_SYMBOL(__down_read);
310EXPORT_SYMBOL(__down_read_trylock);
311EXPORT_SYMBOL(__down_write_nested);
312EXPORT_SYMBOL(__down_write);
313EXPORT_SYMBOL(__down_write_trylock);
314EXPORT_SYMBOL(__up_read);
315EXPORT_SYMBOL(__up_write);
316EXPORT_SYMBOL(__downgrade_write);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 9c4b0256490b..4755b98b6dfb 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,8 +13,8 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/module.h> 14#include <linux/module.h>
15 15
16void __spin_lock_init(spinlock_t *lock, const char *name, 16void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
17 struct lock_class_key *key) 17 struct lock_class_key *key)
18{ 18{
19#ifdef CONFIG_DEBUG_LOCK_ALLOC 19#ifdef CONFIG_DEBUG_LOCK_ALLOC
20 /* 20 /*
@@ -23,13 +23,13 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
24 lockdep_init_map(&lock->dep_map, name, key, 0); 24 lockdep_init_map(&lock->dep_map, name, key, 0);
25#endif 25#endif
26 lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
27 lock->magic = SPINLOCK_MAGIC; 27 lock->magic = SPINLOCK_MAGIC;
28 lock->owner = SPINLOCK_OWNER_INIT; 28 lock->owner = SPINLOCK_OWNER_INIT;
29 lock->owner_cpu = -1; 29 lock->owner_cpu = -1;
30} 30}
31 31
32EXPORT_SYMBOL(__spin_lock_init); 32EXPORT_SYMBOL(__raw_spin_lock_init);
33 33
34void __rwlock_init(rwlock_t *lock, const char *name, 34void __rwlock_init(rwlock_t *lock, const char *name,
35 struct lock_class_key *key) 35 struct lock_class_key *key)
@@ -41,7 +41,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
41 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 41 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
42 lockdep_init_map(&lock->dep_map, name, key, 0); 42 lockdep_init_map(&lock->dep_map, name, key, 0);
43#endif 43#endif
44 lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; 44 lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
45 lock->magic = RWLOCK_MAGIC; 45 lock->magic = RWLOCK_MAGIC;
46 lock->owner = SPINLOCK_OWNER_INIT; 46 lock->owner = SPINLOCK_OWNER_INIT;
47 lock->owner_cpu = -1; 47 lock->owner_cpu = -1;
@@ -49,7 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
49 49
50EXPORT_SYMBOL(__rwlock_init); 50EXPORT_SYMBOL(__rwlock_init);
51 51
52static void spin_bug(spinlock_t *lock, const char *msg) 52static void spin_bug(raw_spinlock_t *lock, const char *msg)
53{ 53{
54 struct task_struct *owner = NULL; 54 struct task_struct *owner = NULL;
55 55
@@ -73,7 +73,7 @@ static void spin_bug(spinlock_t *lock, const char *msg)
73#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) 73#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
74 74
75static inline void 75static inline void
76debug_spin_lock_before(spinlock_t *lock) 76debug_spin_lock_before(raw_spinlock_t *lock)
77{ 77{
78 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 78 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
79 SPIN_BUG_ON(lock->owner == current, lock, "recursion"); 79 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
@@ -81,16 +81,16 @@ debug_spin_lock_before(spinlock_t *lock)
81 lock, "cpu recursion"); 81 lock, "cpu recursion");
82} 82}
83 83
84static inline void debug_spin_lock_after(spinlock_t *lock) 84static inline void debug_spin_lock_after(raw_spinlock_t *lock)
85{ 85{
86 lock->owner_cpu = raw_smp_processor_id(); 86 lock->owner_cpu = raw_smp_processor_id();
87 lock->owner = current; 87 lock->owner = current;
88} 88}
89 89
90static inline void debug_spin_unlock(spinlock_t *lock) 90static inline void debug_spin_unlock(raw_spinlock_t *lock)
91{ 91{
92 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 92 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
93 SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); 93 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
94 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); 94 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
95 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), 95 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
96 lock, "wrong CPU"); 96 lock, "wrong CPU");
@@ -98,7 +98,7 @@ static inline void debug_spin_unlock(spinlock_t *lock)
98 lock->owner_cpu = -1; 98 lock->owner_cpu = -1;
99} 99}
100 100
101static void __spin_lock_debug(spinlock_t *lock) 101static void __spin_lock_debug(raw_spinlock_t *lock)
102{ 102{
103 u64 i; 103 u64 i;
104 u64 loops = loops_per_jiffy * HZ; 104 u64 loops = loops_per_jiffy * HZ;
@@ -106,7 +106,7 @@ static void __spin_lock_debug(spinlock_t *lock)
106 106
107 for (;;) { 107 for (;;) {
108 for (i = 0; i < loops; i++) { 108 for (i = 0; i < loops; i++) {
109 if (__raw_spin_trylock(&lock->raw_lock)) 109 if (arch_spin_trylock(&lock->raw_lock))
110 return; 110 return;
111 __delay(1); 111 __delay(1);
112 } 112 }
@@ -125,17 +125,17 @@ static void __spin_lock_debug(spinlock_t *lock)
125 } 125 }
126} 126}
127 127
128void _raw_spin_lock(spinlock_t *lock) 128void do_raw_spin_lock(raw_spinlock_t *lock)
129{ 129{
130 debug_spin_lock_before(lock); 130 debug_spin_lock_before(lock);
131 if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) 131 if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
132 __spin_lock_debug(lock); 132 __spin_lock_debug(lock);
133 debug_spin_lock_after(lock); 133 debug_spin_lock_after(lock);
134} 134}
135 135
136int _raw_spin_trylock(spinlock_t *lock) 136int do_raw_spin_trylock(raw_spinlock_t *lock)
137{ 137{
138 int ret = __raw_spin_trylock(&lock->raw_lock); 138 int ret = arch_spin_trylock(&lock->raw_lock);
139 139
140 if (ret) 140 if (ret)
141 debug_spin_lock_after(lock); 141 debug_spin_lock_after(lock);
@@ -148,10 +148,10 @@ int _raw_spin_trylock(spinlock_t *lock)
148 return ret; 148 return ret;
149} 149}
150 150
151void _raw_spin_unlock(spinlock_t *lock) 151void do_raw_spin_unlock(raw_spinlock_t *lock)
152{ 152{
153 debug_spin_unlock(lock); 153 debug_spin_unlock(lock);
154 __raw_spin_unlock(&lock->raw_lock); 154 arch_spin_unlock(&lock->raw_lock);
155} 155}
156 156
157static void rwlock_bug(rwlock_t *lock, const char *msg) 157static void rwlock_bug(rwlock_t *lock, const char *msg)
@@ -176,7 +176,7 @@ static void __read_lock_debug(rwlock_t *lock)
176 176
177 for (;;) { 177 for (;;) {
178 for (i = 0; i < loops; i++) { 178 for (i = 0; i < loops; i++) {
179 if (__raw_read_trylock(&lock->raw_lock)) 179 if (arch_read_trylock(&lock->raw_lock))
180 return; 180 return;
181 __delay(1); 181 __delay(1);
182 } 182 }
@@ -193,15 +193,15 @@ static void __read_lock_debug(rwlock_t *lock)
193} 193}
194#endif 194#endif
195 195
196void _raw_read_lock(rwlock_t *lock) 196void do_raw_read_lock(rwlock_t *lock)
197{ 197{
198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
199 __raw_read_lock(&lock->raw_lock); 199 arch_read_lock(&lock->raw_lock);
200} 200}
201 201
202int _raw_read_trylock(rwlock_t *lock) 202int do_raw_read_trylock(rwlock_t *lock)
203{ 203{
204 int ret = __raw_read_trylock(&lock->raw_lock); 204 int ret = arch_read_trylock(&lock->raw_lock);
205 205
206#ifndef CONFIG_SMP 206#ifndef CONFIG_SMP
207 /* 207 /*
@@ -212,10 +212,10 @@ int _raw_read_trylock(rwlock_t *lock)
212 return ret; 212 return ret;
213} 213}
214 214
215void _raw_read_unlock(rwlock_t *lock) 215void do_raw_read_unlock(rwlock_t *lock)
216{ 216{
217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
218 __raw_read_unlock(&lock->raw_lock); 218 arch_read_unlock(&lock->raw_lock);
219} 219}
220 220
221static inline void debug_write_lock_before(rwlock_t *lock) 221static inline void debug_write_lock_before(rwlock_t *lock)
@@ -251,7 +251,7 @@ static void __write_lock_debug(rwlock_t *lock)
251 251
252 for (;;) { 252 for (;;) {
253 for (i = 0; i < loops; i++) { 253 for (i = 0; i < loops; i++) {
254 if (__raw_write_trylock(&lock->raw_lock)) 254 if (arch_write_trylock(&lock->raw_lock))
255 return; 255 return;
256 __delay(1); 256 __delay(1);
257 } 257 }
@@ -268,16 +268,16 @@ static void __write_lock_debug(rwlock_t *lock)
268} 268}
269#endif 269#endif
270 270
271void _raw_write_lock(rwlock_t *lock) 271void do_raw_write_lock(rwlock_t *lock)
272{ 272{
273 debug_write_lock_before(lock); 273 debug_write_lock_before(lock);
274 __raw_write_lock(&lock->raw_lock); 274 arch_write_lock(&lock->raw_lock);
275 debug_write_lock_after(lock); 275 debug_write_lock_after(lock);
276} 276}
277 277
278int _raw_write_trylock(rwlock_t *lock) 278int do_raw_write_trylock(rwlock_t *lock)
279{ 279{
280 int ret = __raw_write_trylock(&lock->raw_lock); 280 int ret = arch_write_trylock(&lock->raw_lock);
281 281
282 if (ret) 282 if (ret)
283 debug_write_lock_after(lock); 283 debug_write_lock_after(lock);
@@ -290,8 +290,8 @@ int _raw_write_trylock(rwlock_t *lock)
290 return ret; 290 return ret;
291} 291}
292 292
293void _raw_write_unlock(rwlock_t *lock) 293void do_raw_write_unlock(rwlock_t *lock)
294{ 294{
295 debug_write_unlock(lock); 295 debug_write_unlock(lock);
296 __raw_write_unlock(&lock->raw_lock); 296 arch_write_unlock(&lock->raw_lock);
297} 297}
diff --git a/lib/string.c b/lib/string.c
index b19b87af65a3..afce96af3afd 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -246,13 +246,17 @@ EXPORT_SYMBOL(strlcat);
246#undef strcmp 246#undef strcmp
247int strcmp(const char *cs, const char *ct) 247int strcmp(const char *cs, const char *ct)
248{ 248{
249 signed char __res; 249 unsigned char c1, c2;
250 250
251 while (1) { 251 while (1) {
252 if ((__res = *cs - *ct++) != 0 || !*cs++) 252 c1 = *cs++;
253 c2 = *ct++;
254 if (c1 != c2)
255 return c1 < c2 ? -1 : 1;
256 if (!c1)
253 break; 257 break;
254 } 258 }
255 return __res; 259 return 0;
256} 260}
257EXPORT_SYMBOL(strcmp); 261EXPORT_SYMBOL(strcmp);
258#endif 262#endif
@@ -266,14 +270,18 @@ EXPORT_SYMBOL(strcmp);
266 */ 270 */
267int strncmp(const char *cs, const char *ct, size_t count) 271int strncmp(const char *cs, const char *ct, size_t count)
268{ 272{
269 signed char __res = 0; 273 unsigned char c1, c2;
270 274
271 while (count) { 275 while (count) {
272 if ((__res = *cs - *ct++) != 0 || !*cs++) 276 c1 = *cs++;
277 c2 = *ct++;
278 if (c1 != c2)
279 return c1 < c2 ? -1 : 1;
280 if (!c1)
273 break; 281 break;
274 count--; 282 count--;
275 } 283 }
276 return __res; 284 return 0;
277} 285}
278EXPORT_SYMBOL(strncmp); 286EXPORT_SYMBOL(strncmp);
279#endif 287#endif
@@ -330,20 +338,34 @@ EXPORT_SYMBOL(strnchr);
330#endif 338#endif
331 339
332/** 340/**
333 * strstrip - Removes leading and trailing whitespace from @s. 341 * skip_spaces - Removes leading whitespace from @s.
342 * @s: The string to be stripped.
343 *
344 * Returns a pointer to the first non-whitespace character in @s.
345 */
346char *skip_spaces(const char *str)
347{
348 while (isspace(*str))
349 ++str;
350 return (char *)str;
351}
352EXPORT_SYMBOL(skip_spaces);
353
354/**
355 * strim - Removes leading and trailing whitespace from @s.
334 * @s: The string to be stripped. 356 * @s: The string to be stripped.
335 * 357 *
336 * Note that the first trailing whitespace is replaced with a %NUL-terminator 358 * Note that the first trailing whitespace is replaced with a %NUL-terminator
337 * in the given string @s. Returns a pointer to the first non-whitespace 359 * in the given string @s. Returns a pointer to the first non-whitespace
338 * character in @s. 360 * character in @s.
339 */ 361 */
340char *strstrip(char *s) 362char *strim(char *s)
341{ 363{
342 size_t size; 364 size_t size;
343 char *end; 365 char *end;
344 366
367 s = skip_spaces(s);
345 size = strlen(s); 368 size = strlen(s);
346
347 if (!size) 369 if (!size)
348 return s; 370 return s;
349 371
@@ -352,12 +374,9 @@ char *strstrip(char *s)
352 end--; 374 end--;
353 *(end + 1) = '\0'; 375 *(end + 1) = '\0';
354 376
355 while (*s && isspace(*s))
356 s++;
357
358 return s; 377 return s;
359} 378}
360EXPORT_SYMBOL(strstrip); 379EXPORT_SYMBOL(strim);
361 380
362#ifndef __HAVE_ARCH_STRLEN 381#ifndef __HAVE_ARCH_STRLEN
363/** 382/**
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index ac25cd28e807..437eedb5a53b 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -97,6 +97,8 @@ static phys_addr_t *io_tlb_orig_addr;
97 */ 97 */
98static DEFINE_SPINLOCK(io_tlb_lock); 98static DEFINE_SPINLOCK(io_tlb_lock);
99 99
100static int late_alloc;
101
100static int __init 102static int __init
101setup_io_tlb_npages(char *str) 103setup_io_tlb_npages(char *str)
102{ 104{
@@ -109,6 +111,7 @@ setup_io_tlb_npages(char *str)
109 ++str; 111 ++str;
110 if (!strcmp(str, "force")) 112 if (!strcmp(str, "force"))
111 swiotlb_force = 1; 113 swiotlb_force = 1;
114
112 return 1; 115 return 1;
113} 116}
114__setup("swiotlb=", setup_io_tlb_npages); 117__setup("swiotlb=", setup_io_tlb_npages);
@@ -121,8 +124,9 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
121 return phys_to_dma(hwdev, virt_to_phys(address)); 124 return phys_to_dma(hwdev, virt_to_phys(address));
122} 125}
123 126
124static void swiotlb_print_info(unsigned long bytes) 127void swiotlb_print_info(void)
125{ 128{
129 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
126 phys_addr_t pstart, pend; 130 phys_addr_t pstart, pend;
127 131
128 pstart = virt_to_phys(io_tlb_start); 132 pstart = virt_to_phys(io_tlb_start);
@@ -140,7 +144,7 @@ static void swiotlb_print_info(unsigned long bytes)
140 * structures for the software IO TLB used to implement the DMA API. 144 * structures for the software IO TLB used to implement the DMA API.
141 */ 145 */
142void __init 146void __init
143swiotlb_init_with_default_size(size_t default_size) 147swiotlb_init_with_default_size(size_t default_size, int verbose)
144{ 148{
145 unsigned long i, bytes; 149 unsigned long i, bytes;
146 150
@@ -176,14 +180,14 @@ swiotlb_init_with_default_size(size_t default_size)
176 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 180 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
177 if (!io_tlb_overflow_buffer) 181 if (!io_tlb_overflow_buffer)
178 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 182 panic("Cannot allocate SWIOTLB overflow buffer!\n");
179 183 if (verbose)
180 swiotlb_print_info(bytes); 184 swiotlb_print_info();
181} 185}
182 186
183void __init 187void __init
184swiotlb_init(void) 188swiotlb_init(int verbose)
185{ 189{
186 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 190 swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */
187} 191}
188 192
189/* 193/*
@@ -260,7 +264,9 @@ swiotlb_late_init_with_default_size(size_t default_size)
260 if (!io_tlb_overflow_buffer) 264 if (!io_tlb_overflow_buffer)
261 goto cleanup4; 265 goto cleanup4;
262 266
263 swiotlb_print_info(bytes); 267 swiotlb_print_info();
268
269 late_alloc = 1;
264 270
265 return 0; 271 return 0;
266 272
@@ -281,6 +287,32 @@ cleanup1:
281 return -ENOMEM; 287 return -ENOMEM;
282} 288}
283 289
290void __init swiotlb_free(void)
291{
292 if (!io_tlb_overflow_buffer)
293 return;
294
295 if (late_alloc) {
296 free_pages((unsigned long)io_tlb_overflow_buffer,
297 get_order(io_tlb_overflow));
298 free_pages((unsigned long)io_tlb_orig_addr,
299 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
300 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
301 sizeof(int)));
302 free_pages((unsigned long)io_tlb_start,
303 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
304 } else {
305 free_bootmem_late(__pa(io_tlb_overflow_buffer),
306 io_tlb_overflow);
307 free_bootmem_late(__pa(io_tlb_orig_addr),
308 io_tlb_nslabs * sizeof(phys_addr_t));
309 free_bootmem_late(__pa(io_tlb_list),
310 io_tlb_nslabs * sizeof(int));
311 free_bootmem_late(__pa(io_tlb_start),
312 io_tlb_nslabs << IO_TLB_SHIFT);
313 }
314}
315
284static int is_swiotlb_buffer(phys_addr_t paddr) 316static int is_swiotlb_buffer(phys_addr_t paddr)
285{ 317{
286 return paddr >= virt_to_phys(io_tlb_start) && 318 return paddr >= virt_to_phys(io_tlb_start) &&
@@ -453,7 +485,7 @@ do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
453 485
454 /* 486 /*
455 * Return the buffer to the free list by setting the corresponding 487 * Return the buffer to the free list by setting the corresponding
456 * entries to indicate the number of contigous entries available. 488 * entries to indicate the number of contiguous entries available.
457 * While returning the entries to the free list, we merge the entries 489 * While returning the entries to the free list, we merge the entries
458 * with slots below and above the pool being returned. 490 * with slots below and above the pool being returned.
459 */ 491 */
@@ -517,7 +549,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
517 dma_mask = hwdev->coherent_dma_mask; 549 dma_mask = hwdev->coherent_dma_mask;
518 550
519 ret = (void *)__get_free_pages(flags, order); 551 ret = (void *)__get_free_pages(flags, order);
520 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) { 552 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size - 1 > dma_mask) {
521 /* 553 /*
522 * The allocated memory isn't reachable by the device. 554 * The allocated memory isn't reachable by the device.
523 */ 555 */
@@ -539,7 +571,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
539 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 571 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
540 572
541 /* Confirm address can be DMA'd by device */ 573 /* Confirm address can be DMA'd by device */
542 if (dev_addr + size > dma_mask) { 574 if (dev_addr + size - 1 > dma_mask) {
543 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 575 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
544 (unsigned long long)dma_mask, 576 (unsigned long long)dma_mask,
545 (unsigned long long)dev_addr); 577 (unsigned long long)dev_addr);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 73a14b8c6d1f..735343fc857a 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -9,7 +9,7 @@
9 * Wirzenius wrote this portably, Torvalds fucked it up :-) 9 * Wirzenius wrote this portably, Torvalds fucked it up :-)
10 */ 10 */
11 11
12/* 12/*
13 * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com> 13 * Fri Jul 13 2001 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
14 * - changed to provide snprintf and vsnprintf functions 14 * - changed to provide snprintf and vsnprintf functions
15 * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de> 15 * So Feb 1 16:51:32 CET 2004 Juergen Quade <quade@hsnr.de>
@@ -47,14 +47,14 @@ static unsigned int simple_guess_base(const char *cp)
47} 47}
48 48
49/** 49/**
50 * simple_strtoul - convert a string to an unsigned long 50 * simple_strtoull - convert a string to an unsigned long long
51 * @cp: The start of the string 51 * @cp: The start of the string
52 * @endp: A pointer to the end of the parsed string will be placed here 52 * @endp: A pointer to the end of the parsed string will be placed here
53 * @base: The number base to use 53 * @base: The number base to use
54 */ 54 */
55unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base) 55unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
56{ 56{
57 unsigned long result = 0; 57 unsigned long long result = 0;
58 58
59 if (!base) 59 if (!base)
60 base = simple_guess_base(cp); 60 base = simple_guess_base(cp);
@@ -71,58 +71,39 @@ unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
71 result = result * base + value; 71 result = result * base + value;
72 cp++; 72 cp++;
73 } 73 }
74
75 if (endp) 74 if (endp)
76 *endp = (char *)cp; 75 *endp = (char *)cp;
76
77 return result; 77 return result;
78} 78}
79EXPORT_SYMBOL(simple_strtoul); 79EXPORT_SYMBOL(simple_strtoull);
80 80
81/** 81/**
82 * simple_strtol - convert a string to a signed long 82 * simple_strtoul - convert a string to an unsigned long
83 * @cp: The start of the string 83 * @cp: The start of the string
84 * @endp: A pointer to the end of the parsed string will be placed here 84 * @endp: A pointer to the end of the parsed string will be placed here
85 * @base: The number base to use 85 * @base: The number base to use
86 */ 86 */
87long simple_strtol(const char *cp, char **endp, unsigned int base) 87unsigned long simple_strtoul(const char *cp, char **endp, unsigned int base)
88{ 88{
89 if(*cp == '-') 89 return simple_strtoull(cp, endp, base);
90 return -simple_strtoul(cp + 1, endp, base);
91 return simple_strtoul(cp, endp, base);
92} 90}
93EXPORT_SYMBOL(simple_strtol); 91EXPORT_SYMBOL(simple_strtoul);
94 92
95/** 93/**
96 * simple_strtoull - convert a string to an unsigned long long 94 * simple_strtol - convert a string to a signed long
97 * @cp: The start of the string 95 * @cp: The start of the string
98 * @endp: A pointer to the end of the parsed string will be placed here 96 * @endp: A pointer to the end of the parsed string will be placed here
99 * @base: The number base to use 97 * @base: The number base to use
100 */ 98 */
101unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base) 99long simple_strtol(const char *cp, char **endp, unsigned int base)
102{ 100{
103 unsigned long long result = 0; 101 if (*cp == '-')
104 102 return -simple_strtoul(cp + 1, endp, base);
105 if (!base)
106 base = simple_guess_base(cp);
107
108 if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
109 cp += 2;
110
111 while (isxdigit(*cp)) {
112 unsigned int value;
113
114 value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
115 if (value >= base)
116 break;
117 result = result * base + value;
118 cp++;
119 }
120 103
121 if (endp) 104 return simple_strtoul(cp, endp, base);
122 *endp = (char *)cp;
123 return result;
124} 105}
125EXPORT_SYMBOL(simple_strtoull); 106EXPORT_SYMBOL(simple_strtol);
126 107
127/** 108/**
128 * simple_strtoll - convert a string to a signed long long 109 * simple_strtoll - convert a string to a signed long long
@@ -132,8 +113,9 @@ EXPORT_SYMBOL(simple_strtoull);
132 */ 113 */
133long long simple_strtoll(const char *cp, char **endp, unsigned int base) 114long long simple_strtoll(const char *cp, char **endp, unsigned int base)
134{ 115{
135 if(*cp=='-') 116 if (*cp == '-')
136 return -simple_strtoull(cp + 1, endp, base); 117 return -simple_strtoull(cp + 1, endp, base);
118
137 return simple_strtoull(cp, endp, base); 119 return simple_strtoull(cp, endp, base);
138} 120}
139 121
@@ -173,6 +155,7 @@ int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
173 val = simple_strtoul(cp, &tail, base); 155 val = simple_strtoul(cp, &tail, base);
174 if (tail == cp) 156 if (tail == cp)
175 return -EINVAL; 157 return -EINVAL;
158
176 if ((*tail == '\0') || 159 if ((*tail == '\0') ||
177 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) { 160 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
178 *res = val; 161 *res = val;
@@ -285,10 +268,11 @@ EXPORT_SYMBOL(strict_strtoll);
285 268
286static int skip_atoi(const char **s) 269static int skip_atoi(const char **s)
287{ 270{
288 int i=0; 271 int i = 0;
289 272
290 while (isdigit(**s)) 273 while (isdigit(**s))
291 i = i*10 + *((*s)++) - '0'; 274 i = i*10 + *((*s)++) - '0';
275
292 return i; 276 return i;
293} 277}
294 278
@@ -302,7 +286,7 @@ static int skip_atoi(const char **s)
302/* Formats correctly any integer in [0,99999]. 286/* Formats correctly any integer in [0,99999].
303 * Outputs from one to five digits depending on input. 287 * Outputs from one to five digits depending on input.
304 * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */ 288 * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
305static char* put_dec_trunc(char *buf, unsigned q) 289static char *put_dec_trunc(char *buf, unsigned q)
306{ 290{
307 unsigned d3, d2, d1, d0; 291 unsigned d3, d2, d1, d0;
308 d1 = (q>>4) & 0xf; 292 d1 = (q>>4) & 0xf;
@@ -331,14 +315,15 @@ static char* put_dec_trunc(char *buf, unsigned q)
331 d3 = d3 - 10*q; 315 d3 = d3 - 10*q;
332 *buf++ = d3 + '0'; /* next digit */ 316 *buf++ = d3 + '0'; /* next digit */
333 if (q != 0) 317 if (q != 0)
334 *buf++ = q + '0'; /* most sign. digit */ 318 *buf++ = q + '0'; /* most sign. digit */
335 } 319 }
336 } 320 }
337 } 321 }
322
338 return buf; 323 return buf;
339} 324}
340/* Same with if's removed. Always emits five digits */ 325/* Same with if's removed. Always emits five digits */
341static char* put_dec_full(char *buf, unsigned q) 326static char *put_dec_full(char *buf, unsigned q)
342{ 327{
343 /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ 328 /* BTW, if q is in [0,9999], 8-bit ints will be enough, */
344 /* but anyway, gcc produces better code with full-sized ints */ 329 /* but anyway, gcc produces better code with full-sized ints */
@@ -347,14 +332,15 @@ static char* put_dec_full(char *buf, unsigned q)
347 d2 = (q>>8) & 0xf; 332 d2 = (q>>8) & 0xf;
348 d3 = (q>>12); 333 d3 = (q>>12);
349 334
350 /* Possible ways to approx. divide by 10 */ 335 /*
351 /* gcc -O2 replaces multiply with shifts and adds */ 336 * Possible ways to approx. divide by 10
352 // (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) 337 * gcc -O2 replaces multiply with shifts and adds
353 // (x * 0x67) >> 10: 1100111 338 * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386)
354 // (x * 0x34) >> 9: 110100 - same 339 * (x * 0x67) >> 10: 1100111
355 // (x * 0x1a) >> 8: 11010 - same 340 * (x * 0x34) >> 9: 110100 - same
356 // (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) 341 * (x * 0x1a) >> 8: 11010 - same
357 342 * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386)
343 */
358 d0 = 6*(d3 + d2 + d1) + (q & 0xf); 344 d0 = 6*(d3 + d2 + d1) + (q & 0xf);
359 q = (d0 * 0xcd) >> 11; 345 q = (d0 * 0xcd) >> 11;
360 d0 = d0 - 10*q; 346 d0 = d0 - 10*q;
@@ -375,10 +361,11 @@ static char* put_dec_full(char *buf, unsigned q)
375 d3 = d3 - 10*q; 361 d3 = d3 - 10*q;
376 *buf++ = d3 + '0'; 362 *buf++ = d3 + '0';
377 *buf++ = q + '0'; 363 *buf++ = q + '0';
364
378 return buf; 365 return buf;
379} 366}
380/* No inlining helps gcc to use registers better */ 367/* No inlining helps gcc to use registers better */
381static noinline char* put_dec(char *buf, unsigned long long num) 368static noinline char *put_dec(char *buf, unsigned long long num)
382{ 369{
383 while (1) { 370 while (1) {
384 unsigned rem; 371 unsigned rem;
@@ -448,9 +435,9 @@ static char *number(char *buf, char *end, unsigned long long num,
448 spec.flags &= ~ZEROPAD; 435 spec.flags &= ~ZEROPAD;
449 sign = 0; 436 sign = 0;
450 if (spec.flags & SIGN) { 437 if (spec.flags & SIGN) {
451 if ((signed long long) num < 0) { 438 if ((signed long long)num < 0) {
452 sign = '-'; 439 sign = '-';
453 num = - (signed long long) num; 440 num = -(signed long long)num;
454 spec.field_width--; 441 spec.field_width--;
455 } else if (spec.flags & PLUS) { 442 } else if (spec.flags & PLUS) {
456 sign = '+'; 443 sign = '+';
@@ -478,7 +465,9 @@ static char *number(char *buf, char *end, unsigned long long num,
478 else if (spec.base != 10) { /* 8 or 16 */ 465 else if (spec.base != 10) { /* 8 or 16 */
479 int mask = spec.base - 1; 466 int mask = spec.base - 1;
480 int shift = 3; 467 int shift = 3;
481 if (spec.base == 16) shift = 4; 468
469 if (spec.base == 16)
470 shift = 4;
482 do { 471 do {
483 tmp[i++] = (digits[((unsigned char)num) & mask] | locase); 472 tmp[i++] = (digits[((unsigned char)num) & mask] | locase);
484 num >>= shift; 473 num >>= shift;
@@ -493,7 +482,7 @@ static char *number(char *buf, char *end, unsigned long long num,
493 /* leading space padding */ 482 /* leading space padding */
494 spec.field_width -= spec.precision; 483 spec.field_width -= spec.precision;
495 if (!(spec.flags & (ZEROPAD+LEFT))) { 484 if (!(spec.flags & (ZEROPAD+LEFT))) {
496 while(--spec.field_width >= 0) { 485 while (--spec.field_width >= 0) {
497 if (buf < end) 486 if (buf < end)
498 *buf = ' '; 487 *buf = ' ';
499 ++buf; 488 ++buf;
@@ -543,15 +532,16 @@ static char *number(char *buf, char *end, unsigned long long num,
543 *buf = ' '; 532 *buf = ' ';
544 ++buf; 533 ++buf;
545 } 534 }
535
546 return buf; 536 return buf;
547} 537}
548 538
549static char *string(char *buf, char *end, char *s, struct printf_spec spec) 539static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
550{ 540{
551 int len, i; 541 int len, i;
552 542
553 if ((unsigned long)s < PAGE_SIZE) 543 if ((unsigned long)s < PAGE_SIZE)
554 s = "<NULL>"; 544 s = "(null)";
555 545
556 len = strnlen(s, spec.precision); 546 len = strnlen(s, spec.precision);
557 547
@@ -572,6 +562,7 @@ static char *string(char *buf, char *end, char *s, struct printf_spec spec)
572 *buf = ' '; 562 *buf = ' ';
573 ++buf; 563 ++buf;
574 } 564 }
565
575 return buf; 566 return buf;
576} 567}
577 568
@@ -585,47 +576,101 @@ static char *symbol_string(char *buf, char *end, void *ptr,
585 sprint_symbol(sym, value); 576 sprint_symbol(sym, value);
586 else 577 else
587 kallsyms_lookup(value, NULL, NULL, NULL, sym); 578 kallsyms_lookup(value, NULL, NULL, NULL, sym);
579
588 return string(buf, end, sym, spec); 580 return string(buf, end, sym, spec);
589#else 581#else
590 spec.field_width = 2*sizeof(void *); 582 spec.field_width = 2 * sizeof(void *);
591 spec.flags |= SPECIAL | SMALL | ZEROPAD; 583 spec.flags |= SPECIAL | SMALL | ZEROPAD;
592 spec.base = 16; 584 spec.base = 16;
585
593 return number(buf, end, value, spec); 586 return number(buf, end, value, spec);
594#endif 587#endif
595} 588}
596 589
597static char *resource_string(char *buf, char *end, struct resource *res, 590static char *resource_string(char *buf, char *end, struct resource *res,
598 struct printf_spec spec) 591 struct printf_spec spec, const char *fmt)
599{ 592{
600#ifndef IO_RSRC_PRINTK_SIZE 593#ifndef IO_RSRC_PRINTK_SIZE
601#define IO_RSRC_PRINTK_SIZE 4 594#define IO_RSRC_PRINTK_SIZE 6
602#endif 595#endif
603 596
604#ifndef MEM_RSRC_PRINTK_SIZE 597#ifndef MEM_RSRC_PRINTK_SIZE
605#define MEM_RSRC_PRINTK_SIZE 8 598#define MEM_RSRC_PRINTK_SIZE 10
606#endif 599#endif
607 struct printf_spec num_spec = { 600 struct printf_spec hex_spec = {
608 .base = 16, 601 .base = 16,
609 .precision = -1, 602 .precision = -1,
610 .flags = SPECIAL | SMALL | ZEROPAD, 603 .flags = SPECIAL | SMALL | ZEROPAD,
611 }; 604 };
612 /* room for the actual numbers, the two "0x", -, [, ] and the final zero */ 605 struct printf_spec dec_spec = {
613 char sym[4*sizeof(resource_size_t) + 8]; 606 .base = 10,
607 .precision = -1,
608 .flags = 0,
609 };
610 struct printf_spec str_spec = {
611 .field_width = -1,
612 .precision = 10,
613 .flags = LEFT,
614 };
615 struct printf_spec flag_spec = {
616 .base = 16,
617 .precision = -1,
618 .flags = SPECIAL | SMALL,
619 };
620
621 /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8)
622 * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
623#define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4)
624#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
625#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref disabled]")
626#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
627 char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
628 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
629
614 char *p = sym, *pend = sym + sizeof(sym); 630 char *p = sym, *pend = sym + sizeof(sym);
615 int size = -1; 631 int size = -1, addr = 0;
632 int decode = (fmt[0] == 'R') ? 1 : 0;
616 633
617 if (res->flags & IORESOURCE_IO) 634 if (res->flags & IORESOURCE_IO) {
618 size = IO_RSRC_PRINTK_SIZE; 635 size = IO_RSRC_PRINTK_SIZE;
619 else if (res->flags & IORESOURCE_MEM) 636 addr = 1;
637 } else if (res->flags & IORESOURCE_MEM) {
620 size = MEM_RSRC_PRINTK_SIZE; 638 size = MEM_RSRC_PRINTK_SIZE;
639 addr = 1;
640 }
621 641
622 *p++ = '['; 642 *p++ = '[';
623 num_spec.field_width = size; 643 if (res->flags & IORESOURCE_IO)
624 p = number(p, pend, res->start, num_spec); 644 p = string(p, pend, "io ", str_spec);
625 *p++ = '-'; 645 else if (res->flags & IORESOURCE_MEM)
626 p = number(p, pend, res->end, num_spec); 646 p = string(p, pend, "mem ", str_spec);
647 else if (res->flags & IORESOURCE_IRQ)
648 p = string(p, pend, "irq ", str_spec);
649 else if (res->flags & IORESOURCE_DMA)
650 p = string(p, pend, "dma ", str_spec);
651 else {
652 p = string(p, pend, "??? ", str_spec);
653 decode = 0;
654 }
655 hex_spec.field_width = size;
656 p = number(p, pend, res->start, addr ? hex_spec : dec_spec);
657 if (res->start != res->end) {
658 *p++ = '-';
659 p = number(p, pend, res->end, addr ? hex_spec : dec_spec);
660 }
661 if (decode) {
662 if (res->flags & IORESOURCE_MEM_64)
663 p = string(p, pend, " 64bit", str_spec);
664 if (res->flags & IORESOURCE_PREFETCH)
665 p = string(p, pend, " pref", str_spec);
666 if (res->flags & IORESOURCE_DISABLED)
667 p = string(p, pend, " disabled", str_spec);
668 } else {
669 p = string(p, pend, " flags ", str_spec);
670 p = number(p, pend, res->flags, flag_spec);
671 }
627 *p++ = ']'; 672 *p++ = ']';
628 *p = 0; 673 *p = '\0';
629 674
630 return string(buf, end, sym, spec); 675 return string(buf, end, sym, spec);
631} 676}
@@ -666,24 +711,26 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros)
666 if (i < 3) 711 if (i < 3)
667 *p++ = '.'; 712 *p++ = '.';
668 } 713 }
669
670 *p = '\0'; 714 *p = '\0';
715
671 return p; 716 return p;
672} 717}
673 718
674static char *ip6_compressed_string(char *p, const struct in6_addr *addr) 719static char *ip6_compressed_string(char *p, const char *addr)
675{ 720{
676 int i; 721 int i, j, range;
677 int j;
678 int range;
679 unsigned char zerolength[8]; 722 unsigned char zerolength[8];
680 int longest = 1; 723 int longest = 1;
681 int colonpos = -1; 724 int colonpos = -1;
682 u16 word; 725 u16 word;
683 u8 hi; 726 u8 hi, lo;
684 u8 lo;
685 bool needcolon = false; 727 bool needcolon = false;
686 bool useIPv4 = ipv6_addr_v4mapped(addr) || ipv6_addr_is_isatap(addr); 728 bool useIPv4;
729 struct in6_addr in6;
730
731 memcpy(&in6, addr, sizeof(struct in6_addr));
732
733 useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
687 734
688 memset(zerolength, 0, sizeof(zerolength)); 735 memset(zerolength, 0, sizeof(zerolength));
689 736
@@ -695,7 +742,7 @@ static char *ip6_compressed_string(char *p, const struct in6_addr *addr)
695 /* find position of longest 0 run */ 742 /* find position of longest 0 run */
696 for (i = 0; i < range; i++) { 743 for (i = 0; i < range; i++) {
697 for (j = i; j < range; j++) { 744 for (j = i; j < range; j++) {
698 if (addr->s6_addr16[j] != 0) 745 if (in6.s6_addr16[j] != 0)
699 break; 746 break;
700 zerolength[i]++; 747 zerolength[i]++;
701 } 748 }
@@ -722,7 +769,7 @@ static char *ip6_compressed_string(char *p, const struct in6_addr *addr)
722 needcolon = false; 769 needcolon = false;
723 } 770 }
724 /* hex u16 without leading 0s */ 771 /* hex u16 without leading 0s */
725 word = ntohs(addr->s6_addr16[i]); 772 word = ntohs(in6.s6_addr16[i]);
726 hi = word >> 8; 773 hi = word >> 8;
727 lo = word & 0xff; 774 lo = word & 0xff;
728 if (hi) { 775 if (hi) {
@@ -730,8 +777,9 @@ static char *ip6_compressed_string(char *p, const struct in6_addr *addr)
730 p = pack_hex_byte(p, hi); 777 p = pack_hex_byte(p, hi);
731 else 778 else
732 *p++ = hex_asc_lo(hi); 779 *p++ = hex_asc_lo(hi);
780 p = pack_hex_byte(p, lo);
733 } 781 }
734 if (hi || lo > 0x0f) 782 else if (lo > 0x0f)
735 p = pack_hex_byte(p, lo); 783 p = pack_hex_byte(p, lo);
736 else 784 else
737 *p++ = hex_asc_lo(lo); 785 *p++ = hex_asc_lo(lo);
@@ -741,24 +789,25 @@ static char *ip6_compressed_string(char *p, const struct in6_addr *addr)
741 if (useIPv4) { 789 if (useIPv4) {
742 if (needcolon) 790 if (needcolon)
743 *p++ = ':'; 791 *p++ = ':';
744 p = ip4_string(p, &addr->s6_addr[12], false); 792 p = ip4_string(p, &in6.s6_addr[12], false);
745 } 793 }
746
747 *p = '\0'; 794 *p = '\0';
795
748 return p; 796 return p;
749} 797}
750 798
751static char *ip6_string(char *p, const struct in6_addr *addr, const char *fmt) 799static char *ip6_string(char *p, const char *addr, const char *fmt)
752{ 800{
753 int i; 801 int i;
802
754 for (i = 0; i < 8; i++) { 803 for (i = 0; i < 8; i++) {
755 p = pack_hex_byte(p, addr->s6_addr[2 * i]); 804 p = pack_hex_byte(p, *addr++);
756 p = pack_hex_byte(p, addr->s6_addr[2 * i + 1]); 805 p = pack_hex_byte(p, *addr++);
757 if (fmt[0] == 'I' && i != 7) 806 if (fmt[0] == 'I' && i != 7)
758 *p++ = ':'; 807 *p++ = ':';
759 } 808 }
760
761 *p = '\0'; 809 *p = '\0';
810
762 return p; 811 return p;
763} 812}
764 813
@@ -768,9 +817,9 @@ static char *ip6_addr_string(char *buf, char *end, const u8 *addr,
768 char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")]; 817 char ip6_addr[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255")];
769 818
770 if (fmt[0] == 'I' && fmt[2] == 'c') 819 if (fmt[0] == 'I' && fmt[2] == 'c')
771 ip6_compressed_string(ip6_addr, (const struct in6_addr *)addr); 820 ip6_compressed_string(ip6_addr, addr);
772 else 821 else
773 ip6_string(ip6_addr, (const struct in6_addr *)addr, fmt); 822 ip6_string(ip6_addr, addr, fmt);
774 823
775 return string(buf, end, ip6_addr, spec); 824 return string(buf, end, ip6_addr, spec);
776} 825}
@@ -785,6 +834,52 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
785 return string(buf, end, ip4_addr, spec); 834 return string(buf, end, ip4_addr, spec);
786} 835}
787 836
837static char *uuid_string(char *buf, char *end, const u8 *addr,
838 struct printf_spec spec, const char *fmt)
839{
840 char uuid[sizeof("xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")];
841 char *p = uuid;
842 int i;
843 static const u8 be[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
844 static const u8 le[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
845 const u8 *index = be;
846 bool uc = false;
847
848 switch (*(++fmt)) {
849 case 'L':
850 uc = true; /* fall-through */
851 case 'l':
852 index = le;
853 break;
854 case 'B':
855 uc = true;
856 break;
857 }
858
859 for (i = 0; i < 16; i++) {
860 p = pack_hex_byte(p, addr[index[i]]);
861 switch (i) {
862 case 3:
863 case 5:
864 case 7:
865 case 9:
866 *p++ = '-';
867 break;
868 }
869 }
870
871 *p = 0;
872
873 if (uc) {
874 p = uuid;
875 do {
876 *p = toupper(*p);
877 } while (*(++p));
878 }
879
880 return string(buf, end, uuid, spec);
881}
882
788/* 883/*
789 * Show a '%p' thing. A kernel extension is that the '%p' is followed 884 * Show a '%p' thing. A kernel extension is that the '%p' is followed
790 * by an extra set of alphanumeric characters that are extended format 885 * by an extra set of alphanumeric characters that are extended format
@@ -796,8 +891,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
796 * - 'f' For simple symbolic function names without offset 891 * - 'f' For simple symbolic function names without offset
797 * - 'S' For symbolic direct pointers with offset 892 * - 'S' For symbolic direct pointers with offset
798 * - 's' For symbolic direct pointers without offset 893 * - 's' For symbolic direct pointers without offset
799 * - 'R' For a struct resource pointer, it prints the range of 894 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
800 * addresses (not the name nor the flags) 895 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
801 * - 'M' For a 6-byte MAC address, it prints the address in the 896 * - 'M' For a 6-byte MAC address, it prints the address in the
802 * usual colon-separated hex notation 897 * usual colon-separated hex notation
803 * - 'm' For a 6-byte MAC address, it prints the hex address without colons 898 * - 'm' For a 6-byte MAC address, it prints the hex address without colons
@@ -809,6 +904,18 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
809 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) 904 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
810 * - 'I6c' for IPv6 addresses printed as specified by 905 * - 'I6c' for IPv6 addresses printed as specified by
811 * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt 906 * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt
907 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
908 * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
909 * Options for %pU are:
910 * b big endian lower case hex (default)
911 * B big endian UPPER case hex
912 * l little endian lower case hex
913 * L little endian UPPER case hex
914 * big endian output byte order is:
915 * [0][1][2][3]-[4][5]-[6][7]-[8][9]-[10][11][12][13][14][15]
916 * little endian output byte order is:
917 * [3][2][1][0]-[5][4]-[7][6]-[8][9]-[10][11][12][13][14][15]
918 *
812 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 919 * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
813 * function pointers are really function descriptors, which contain a 920 * function pointers are really function descriptors, which contain a
814 * pointer to the real address. 921 * pointer to the real address.
@@ -823,12 +930,13 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
823 case 'F': 930 case 'F':
824 case 'f': 931 case 'f':
825 ptr = dereference_function_descriptor(ptr); 932 ptr = dereference_function_descriptor(ptr);
826 case 's':
827 /* Fallthrough */ 933 /* Fallthrough */
828 case 'S': 934 case 'S':
935 case 's':
829 return symbol_string(buf, end, ptr, spec, *fmt); 936 return symbol_string(buf, end, ptr, spec, *fmt);
830 case 'R': 937 case 'R':
831 return resource_string(buf, end, ptr, spec); 938 case 'r':
939 return resource_string(buf, end, ptr, spec, fmt);
832 case 'M': /* Colon separated: 00:01:02:03:04:05 */ 940 case 'M': /* Colon separated: 00:01:02:03:04:05 */
833 case 'm': /* Contiguous: 000102030405 */ 941 case 'm': /* Contiguous: 000102030405 */
834 return mac_address_string(buf, end, ptr, spec, fmt); 942 return mac_address_string(buf, end, ptr, spec, fmt);
@@ -848,6 +956,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
848 return ip4_addr_string(buf, end, ptr, spec, fmt); 956 return ip4_addr_string(buf, end, ptr, spec, fmt);
849 } 957 }
850 break; 958 break;
959 case 'U':
960 return uuid_string(buf, end, ptr, spec, fmt);
851 } 961 }
852 spec.flags |= SMALL; 962 spec.flags |= SMALL;
853 if (spec.field_width == -1) { 963 if (spec.field_width == -1) {
@@ -965,8 +1075,8 @@ precision:
965qualifier: 1075qualifier:
966 /* get the conversion qualifier */ 1076 /* get the conversion qualifier */
967 spec->qualifier = -1; 1077 spec->qualifier = -1;
968 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || 1078 if (*fmt == 'h' || TOLOWER(*fmt) == 'l' ||
969 *fmt == 'Z' || *fmt == 'z' || *fmt == 't') { 1079 TOLOWER(*fmt) == 'z' || *fmt == 't') {
970 spec->qualifier = *fmt++; 1080 spec->qualifier = *fmt++;
971 if (unlikely(spec->qualifier == *fmt)) { 1081 if (unlikely(spec->qualifier == *fmt)) {
972 if (spec->qualifier == 'l') { 1082 if (spec->qualifier == 'l') {
@@ -1033,7 +1143,7 @@ qualifier:
1033 spec->type = FORMAT_TYPE_LONG; 1143 spec->type = FORMAT_TYPE_LONG;
1034 else 1144 else
1035 spec->type = FORMAT_TYPE_ULONG; 1145 spec->type = FORMAT_TYPE_ULONG;
1036 } else if (spec->qualifier == 'Z' || spec->qualifier == 'z') { 1146 } else if (TOLOWER(spec->qualifier) == 'z') {
1037 spec->type = FORMAT_TYPE_SIZE_T; 1147 spec->type = FORMAT_TYPE_SIZE_T;
1038 } else if (spec->qualifier == 't') { 1148 } else if (spec->qualifier == 't') {
1039 spec->type = FORMAT_TYPE_PTRDIFF; 1149 spec->type = FORMAT_TYPE_PTRDIFF;
@@ -1086,8 +1196,7 @@ qualifier:
1086int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) 1196int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1087{ 1197{
1088 unsigned long long num; 1198 unsigned long long num;
1089 char *str, *end, c; 1199 char *str, *end;
1090 int read;
1091 struct printf_spec spec = {0}; 1200 struct printf_spec spec = {0};
1092 1201
1093 /* Reject out-of-range values early. Large positive sizes are 1202 /* Reject out-of-range values early. Large positive sizes are
@@ -1106,8 +1215,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1106 1215
1107 while (*fmt) { 1216 while (*fmt) {
1108 const char *old_fmt = fmt; 1217 const char *old_fmt = fmt;
1109 1218 int read = format_decode(fmt, &spec);
1110 read = format_decode(fmt, &spec);
1111 1219
1112 fmt += read; 1220 fmt += read;
1113 1221
@@ -1131,7 +1239,9 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1131 spec.precision = va_arg(args, int); 1239 spec.precision = va_arg(args, int);
1132 break; 1240 break;
1133 1241
1134 case FORMAT_TYPE_CHAR: 1242 case FORMAT_TYPE_CHAR: {
1243 char c;
1244
1135 if (!(spec.flags & LEFT)) { 1245 if (!(spec.flags & LEFT)) {
1136 while (--spec.field_width > 0) { 1246 while (--spec.field_width > 0) {
1137 if (str < end) 1247 if (str < end)
@@ -1150,6 +1260,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1150 ++str; 1260 ++str;
1151 } 1261 }
1152 break; 1262 break;
1263 }
1153 1264
1154 case FORMAT_TYPE_STR: 1265 case FORMAT_TYPE_STR:
1155 str = string(str, end, va_arg(args, char *), spec); 1266 str = string(str, end, va_arg(args, char *), spec);
@@ -1180,8 +1291,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
1180 if (qualifier == 'l') { 1291 if (qualifier == 'l') {
1181 long *ip = va_arg(args, long *); 1292 long *ip = va_arg(args, long *);
1182 *ip = (str - buf); 1293 *ip = (str - buf);
1183 } else if (qualifier == 'Z' || 1294 } else if (TOLOWER(qualifier) == 'z') {
1184 qualifier == 'z') {
1185 size_t *ip = va_arg(args, size_t *); 1295 size_t *ip = va_arg(args, size_t *);
1186 *ip = (str - buf); 1296 *ip = (str - buf);
1187 } else { 1297 } else {
@@ -1264,7 +1374,8 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
1264{ 1374{
1265 int i; 1375 int i;
1266 1376
1267 i=vsnprintf(buf,size,fmt,args); 1377 i = vsnprintf(buf, size, fmt, args);
1378
1268 return (i >= size) ? (size - 1) : i; 1379 return (i >= size) ? (size - 1) : i;
1269} 1380}
1270EXPORT_SYMBOL(vscnprintf); 1381EXPORT_SYMBOL(vscnprintf);
@@ -1283,14 +1394,15 @@ EXPORT_SYMBOL(vscnprintf);
1283 * 1394 *
1284 * See the vsnprintf() documentation for format string extensions over C99. 1395 * See the vsnprintf() documentation for format string extensions over C99.
1285 */ 1396 */
1286int snprintf(char * buf, size_t size, const char *fmt, ...) 1397int snprintf(char *buf, size_t size, const char *fmt, ...)
1287{ 1398{
1288 va_list args; 1399 va_list args;
1289 int i; 1400 int i;
1290 1401
1291 va_start(args, fmt); 1402 va_start(args, fmt);
1292 i=vsnprintf(buf,size,fmt,args); 1403 i = vsnprintf(buf, size, fmt, args);
1293 va_end(args); 1404 va_end(args);
1405
1294 return i; 1406 return i;
1295} 1407}
1296EXPORT_SYMBOL(snprintf); 1408EXPORT_SYMBOL(snprintf);
@@ -1306,7 +1418,7 @@ EXPORT_SYMBOL(snprintf);
1306 * the trailing '\0'. If @size is <= 0 the function returns 0. 1418 * the trailing '\0'. If @size is <= 0 the function returns 0.
1307 */ 1419 */
1308 1420
1309int scnprintf(char * buf, size_t size, const char *fmt, ...) 1421int scnprintf(char *buf, size_t size, const char *fmt, ...)
1310{ 1422{
1311 va_list args; 1423 va_list args;
1312 int i; 1424 int i;
@@ -1314,6 +1426,7 @@ int scnprintf(char * buf, size_t size, const char *fmt, ...)
1314 va_start(args, fmt); 1426 va_start(args, fmt);
1315 i = vsnprintf(buf, size, fmt, args); 1427 i = vsnprintf(buf, size, fmt, args);
1316 va_end(args); 1428 va_end(args);
1429
1317 return (i >= size) ? (size - 1) : i; 1430 return (i >= size) ? (size - 1) : i;
1318} 1431}
1319EXPORT_SYMBOL(scnprintf); 1432EXPORT_SYMBOL(scnprintf);
@@ -1351,14 +1464,15 @@ EXPORT_SYMBOL(vsprintf);
1351 * 1464 *
1352 * See the vsnprintf() documentation for format string extensions over C99. 1465 * See the vsnprintf() documentation for format string extensions over C99.
1353 */ 1466 */
1354int sprintf(char * buf, const char *fmt, ...) 1467int sprintf(char *buf, const char *fmt, ...)
1355{ 1468{
1356 va_list args; 1469 va_list args;
1357 int i; 1470 int i;
1358 1471
1359 va_start(args, fmt); 1472 va_start(args, fmt);
1360 i=vsnprintf(buf, INT_MAX, fmt, args); 1473 i = vsnprintf(buf, INT_MAX, fmt, args);
1361 va_end(args); 1474 va_end(args);
1475
1362 return i; 1476 return i;
1363} 1477}
1364EXPORT_SYMBOL(sprintf); 1478EXPORT_SYMBOL(sprintf);
@@ -1391,7 +1505,6 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
1391{ 1505{
1392 struct printf_spec spec = {0}; 1506 struct printf_spec spec = {0};
1393 char *str, *end; 1507 char *str, *end;
1394 int read;
1395 1508
1396 str = (char *)bin_buf; 1509 str = (char *)bin_buf;
1397 end = (char *)(bin_buf + size); 1510 end = (char *)(bin_buf + size);
@@ -1416,14 +1529,15 @@ do { \
1416 str += sizeof(type); \ 1529 str += sizeof(type); \
1417} while (0) 1530} while (0)
1418 1531
1419
1420 while (*fmt) { 1532 while (*fmt) {
1421 read = format_decode(fmt, &spec); 1533 int read = format_decode(fmt, &spec);
1422 1534
1423 fmt += read; 1535 fmt += read;
1424 1536
1425 switch (spec.type) { 1537 switch (spec.type) {
1426 case FORMAT_TYPE_NONE: 1538 case FORMAT_TYPE_NONE:
1539 case FORMAT_TYPE_INVALID:
1540 case FORMAT_TYPE_PERCENT_CHAR:
1427 break; 1541 break;
1428 1542
1429 case FORMAT_TYPE_WIDTH: 1543 case FORMAT_TYPE_WIDTH:
@@ -1438,13 +1552,14 @@ do { \
1438 case FORMAT_TYPE_STR: { 1552 case FORMAT_TYPE_STR: {
1439 const char *save_str = va_arg(args, char *); 1553 const char *save_str = va_arg(args, char *);
1440 size_t len; 1554 size_t len;
1555
1441 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE 1556 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
1442 || (unsigned long)save_str < PAGE_SIZE) 1557 || (unsigned long)save_str < PAGE_SIZE)
1443 save_str = "<NULL>"; 1558 save_str = "(null)";
1444 len = strlen(save_str); 1559 len = strlen(save_str) + 1;
1445 if (str + len + 1 < end) 1560 if (str + len < end)
1446 memcpy(str, save_str, len + 1); 1561 memcpy(str, save_str, len);
1447 str += len + 1; 1562 str += len;
1448 break; 1563 break;
1449 } 1564 }
1450 1565
@@ -1455,19 +1570,13 @@ do { \
1455 fmt++; 1570 fmt++;
1456 break; 1571 break;
1457 1572
1458 case FORMAT_TYPE_PERCENT_CHAR:
1459 break;
1460
1461 case FORMAT_TYPE_INVALID:
1462 break;
1463
1464 case FORMAT_TYPE_NRCHARS: { 1573 case FORMAT_TYPE_NRCHARS: {
1465 /* skip %n 's argument */ 1574 /* skip %n 's argument */
1466 int qualifier = spec.qualifier; 1575 int qualifier = spec.qualifier;
1467 void *skip_arg; 1576 void *skip_arg;
1468 if (qualifier == 'l') 1577 if (qualifier == 'l')
1469 skip_arg = va_arg(args, long *); 1578 skip_arg = va_arg(args, long *);
1470 else if (qualifier == 'Z' || qualifier == 'z') 1579 else if (TOLOWER(qualifier) == 'z')
1471 skip_arg = va_arg(args, size_t *); 1580 skip_arg = va_arg(args, size_t *);
1472 else 1581 else
1473 skip_arg = va_arg(args, int *); 1582 skip_arg = va_arg(args, int *);
@@ -1503,8 +1612,8 @@ do { \
1503 } 1612 }
1504 } 1613 }
1505 } 1614 }
1506 return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
1507 1615
1616 return (u32 *)(PTR_ALIGN(str, sizeof(u32))) - bin_buf;
1508#undef save_arg 1617#undef save_arg
1509} 1618}
1510EXPORT_SYMBOL_GPL(vbin_printf); 1619EXPORT_SYMBOL_GPL(vbin_printf);
@@ -1533,11 +1642,9 @@ EXPORT_SYMBOL_GPL(vbin_printf);
1533 */ 1642 */
1534int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) 1643int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1535{ 1644{
1536 unsigned long long num;
1537 char *str, *end, c;
1538 const char *args = (const char *)bin_buf;
1539
1540 struct printf_spec spec = {0}; 1645 struct printf_spec spec = {0};
1646 char *str, *end;
1647 const char *args = (const char *)bin_buf;
1541 1648
1542 if (WARN_ON_ONCE((int) size < 0)) 1649 if (WARN_ON_ONCE((int) size < 0))
1543 return 0; 1650 return 0;
@@ -1567,10 +1674,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1567 } 1674 }
1568 1675
1569 while (*fmt) { 1676 while (*fmt) {
1570 int read;
1571 const char *old_fmt = fmt; 1677 const char *old_fmt = fmt;
1572 1678 int read = format_decode(fmt, &spec);
1573 read = format_decode(fmt, &spec);
1574 1679
1575 fmt += read; 1680 fmt += read;
1576 1681
@@ -1594,7 +1699,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1594 spec.precision = get_arg(int); 1699 spec.precision = get_arg(int);
1595 break; 1700 break;
1596 1701
1597 case FORMAT_TYPE_CHAR: 1702 case FORMAT_TYPE_CHAR: {
1703 char c;
1704
1598 if (!(spec.flags & LEFT)) { 1705 if (!(spec.flags & LEFT)) {
1599 while (--spec.field_width > 0) { 1706 while (--spec.field_width > 0) {
1600 if (str < end) 1707 if (str < end)
@@ -1612,11 +1719,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1612 ++str; 1719 ++str;
1613 } 1720 }
1614 break; 1721 break;
1722 }
1615 1723
1616 case FORMAT_TYPE_STR: { 1724 case FORMAT_TYPE_STR: {
1617 const char *str_arg = args; 1725 const char *str_arg = args;
1618 size_t len = strlen(str_arg); 1726 args += strlen(str_arg) + 1;
1619 args += len + 1;
1620 str = string(str, end, (char *)str_arg, spec); 1727 str = string(str, end, (char *)str_arg, spec);
1621 break; 1728 break;
1622 } 1729 }
@@ -1628,11 +1735,6 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1628 break; 1735 break;
1629 1736
1630 case FORMAT_TYPE_PERCENT_CHAR: 1737 case FORMAT_TYPE_PERCENT_CHAR:
1631 if (str < end)
1632 *str = '%';
1633 ++str;
1634 break;
1635
1636 case FORMAT_TYPE_INVALID: 1738 case FORMAT_TYPE_INVALID:
1637 if (str < end) 1739 if (str < end)
1638 *str = '%'; 1740 *str = '%';
@@ -1643,15 +1745,15 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1643 /* skip */ 1745 /* skip */
1644 break; 1746 break;
1645 1747
1646 default: 1748 default: {
1749 unsigned long long num;
1750
1647 switch (spec.type) { 1751 switch (spec.type) {
1648 1752
1649 case FORMAT_TYPE_LONG_LONG: 1753 case FORMAT_TYPE_LONG_LONG:
1650 num = get_arg(long long); 1754 num = get_arg(long long);
1651 break; 1755 break;
1652 case FORMAT_TYPE_ULONG: 1756 case FORMAT_TYPE_ULONG:
1653 num = get_arg(unsigned long);
1654 break;
1655 case FORMAT_TYPE_LONG: 1757 case FORMAT_TYPE_LONG:
1656 num = get_arg(unsigned long); 1758 num = get_arg(unsigned long);
1657 break; 1759 break;
@@ -1681,8 +1783,9 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
1681 } 1783 }
1682 1784
1683 str = number(str, end, num, spec); 1785 str = number(str, end, num, spec);
1684 } 1786 } /* default: */
1685 } 1787 } /* switch(spec.type) */
1788 } /* while(*fmt) */
1686 1789
1687 if (size > 0) { 1790 if (size > 0) {
1688 if (str < end) 1791 if (str < end)
@@ -1716,6 +1819,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...)
1716 va_start(args, fmt); 1819 va_start(args, fmt);
1717 ret = vbin_printf(bin_buf, size, fmt, args); 1820 ret = vbin_printf(bin_buf, size, fmt, args);
1718 va_end(args); 1821 va_end(args);
1822
1719 return ret; 1823 return ret;
1720} 1824}
1721EXPORT_SYMBOL_GPL(bprintf); 1825EXPORT_SYMBOL_GPL(bprintf);
@@ -1728,27 +1832,23 @@ EXPORT_SYMBOL_GPL(bprintf);
1728 * @fmt: format of buffer 1832 * @fmt: format of buffer
1729 * @args: arguments 1833 * @args: arguments
1730 */ 1834 */
1731int vsscanf(const char * buf, const char * fmt, va_list args) 1835int vsscanf(const char *buf, const char *fmt, va_list args)
1732{ 1836{
1733 const char *str = buf; 1837 const char *str = buf;
1734 char *next; 1838 char *next;
1735 char digit; 1839 char digit;
1736 int num = 0; 1840 int num = 0;
1737 int qualifier; 1841 int qualifier, base, field_width;
1738 int base; 1842 bool is_sign;
1739 int field_width;
1740 int is_sign = 0;
1741 1843
1742 while(*fmt && *str) { 1844 while (*fmt && *str) {
1743 /* skip any white space in format */ 1845 /* skip any white space in format */
1744 /* white space in format matchs any amount of 1846 /* white space in format matchs any amount of
1745 * white space, including none, in the input. 1847 * white space, including none, in the input.
1746 */ 1848 */
1747 if (isspace(*fmt)) { 1849 if (isspace(*fmt)) {
1748 while (isspace(*fmt)) 1850 fmt = skip_spaces(++fmt);
1749 ++fmt; 1851 str = skip_spaces(str);
1750 while (isspace(*str))
1751 ++str;
1752 } 1852 }
1753 1853
1754 /* anything that is not a conversion must match exactly */ 1854 /* anything that is not a conversion must match exactly */
@@ -1761,12 +1861,12 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1761 if (!*fmt) 1861 if (!*fmt)
1762 break; 1862 break;
1763 ++fmt; 1863 ++fmt;
1764 1864
1765 /* skip this conversion. 1865 /* skip this conversion.
1766 * advance both strings to next white space 1866 * advance both strings to next white space
1767 */ 1867 */
1768 if (*fmt == '*') { 1868 if (*fmt == '*') {
1769 while (!isspace(*fmt) && *fmt) 1869 while (!isspace(*fmt) && *fmt != '%' && *fmt)
1770 fmt++; 1870 fmt++;
1771 while (!isspace(*str) && *str) 1871 while (!isspace(*str) && *str)
1772 str++; 1872 str++;
@@ -1780,8 +1880,8 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1780 1880
1781 /* get conversion qualifier */ 1881 /* get conversion qualifier */
1782 qualifier = -1; 1882 qualifier = -1;
1783 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || 1883 if (*fmt == 'h' || TOLOWER(*fmt) == 'l' ||
1784 *fmt == 'Z' || *fmt == 'z') { 1884 TOLOWER(*fmt) == 'z') {
1785 qualifier = *fmt++; 1885 qualifier = *fmt++;
1786 if (unlikely(qualifier == *fmt)) { 1886 if (unlikely(qualifier == *fmt)) {
1787 if (qualifier == 'h') { 1887 if (qualifier == 'h') {
@@ -1793,16 +1893,17 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1793 } 1893 }
1794 } 1894 }
1795 } 1895 }
1796 base = 10;
1797 is_sign = 0;
1798 1896
1799 if (!*fmt || !*str) 1897 if (!*fmt || !*str)
1800 break; 1898 break;
1801 1899
1802 switch(*fmt++) { 1900 base = 10;
1901 is_sign = 0;
1902
1903 switch (*fmt++) {
1803 case 'c': 1904 case 'c':
1804 { 1905 {
1805 char *s = (char *) va_arg(args,char*); 1906 char *s = (char *)va_arg(args, char*);
1806 if (field_width == -1) 1907 if (field_width == -1)
1807 field_width = 1; 1908 field_width = 1;
1808 do { 1909 do {
@@ -1813,17 +1914,15 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1813 continue; 1914 continue;
1814 case 's': 1915 case 's':
1815 { 1916 {
1816 char *s = (char *) va_arg(args, char *); 1917 char *s = (char *)va_arg(args, char *);
1817 if(field_width == -1) 1918 if (field_width == -1)
1818 field_width = INT_MAX; 1919 field_width = INT_MAX;
1819 /* first, skip leading white space in buffer */ 1920 /* first, skip leading white space in buffer */
1820 while (isspace(*str)) 1921 str = skip_spaces(str);
1821 str++;
1822 1922
1823 /* now copy until next white space */ 1923 /* now copy until next white space */
1824 while (*str && !isspace(*str) && field_width--) { 1924 while (*str && !isspace(*str) && field_width--)
1825 *s++ = *str++; 1925 *s++ = *str++;
1826 }
1827 *s = '\0'; 1926 *s = '\0';
1828 num++; 1927 num++;
1829 } 1928 }
@@ -1831,7 +1930,7 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1831 case 'n': 1930 case 'n':
1832 /* return number of characters read so far */ 1931 /* return number of characters read so far */
1833 { 1932 {
1834 int *i = (int *)va_arg(args,int*); 1933 int *i = (int *)va_arg(args, int*);
1835 *i = str - buf; 1934 *i = str - buf;
1836 } 1935 }
1837 continue; 1936 continue;
@@ -1843,14 +1942,14 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1843 base = 16; 1942 base = 16;
1844 break; 1943 break;
1845 case 'i': 1944 case 'i':
1846 base = 0; 1945 base = 0;
1847 case 'd': 1946 case 'd':
1848 is_sign = 1; 1947 is_sign = 1;
1849 case 'u': 1948 case 'u':
1850 break; 1949 break;
1851 case '%': 1950 case '%':
1852 /* looking for '%' in str */ 1951 /* looking for '%' in str */
1853 if (*str++ != '%') 1952 if (*str++ != '%')
1854 return num; 1953 return num;
1855 continue; 1954 continue;
1856 default: 1955 default:
@@ -1861,71 +1960,70 @@ int vsscanf(const char * buf, const char * fmt, va_list args)
1861 /* have some sort of integer conversion. 1960 /* have some sort of integer conversion.
1862 * first, skip white space in buffer. 1961 * first, skip white space in buffer.
1863 */ 1962 */
1864 while (isspace(*str)) 1963 str = skip_spaces(str);
1865 str++;
1866 1964
1867 digit = *str; 1965 digit = *str;
1868 if (is_sign && digit == '-') 1966 if (is_sign && digit == '-')
1869 digit = *(str + 1); 1967 digit = *(str + 1);
1870 1968
1871 if (!digit 1969 if (!digit
1872 || (base == 16 && !isxdigit(digit)) 1970 || (base == 16 && !isxdigit(digit))
1873 || (base == 10 && !isdigit(digit)) 1971 || (base == 10 && !isdigit(digit))
1874 || (base == 8 && (!isdigit(digit) || digit > '7')) 1972 || (base == 8 && (!isdigit(digit) || digit > '7'))
1875 || (base == 0 && !isdigit(digit))) 1973 || (base == 0 && !isdigit(digit)))
1876 break; 1974 break;
1877 1975
1878 switch(qualifier) { 1976 switch (qualifier) {
1879 case 'H': /* that's 'hh' in format */ 1977 case 'H': /* that's 'hh' in format */
1880 if (is_sign) { 1978 if (is_sign) {
1881 signed char *s = (signed char *) va_arg(args,signed char *); 1979 signed char *s = (signed char *)va_arg(args, signed char *);
1882 *s = (signed char) simple_strtol(str,&next,base); 1980 *s = (signed char)simple_strtol(str, &next, base);
1883 } else { 1981 } else {
1884 unsigned char *s = (unsigned char *) va_arg(args, unsigned char *); 1982 unsigned char *s = (unsigned char *)va_arg(args, unsigned char *);
1885 *s = (unsigned char) simple_strtoul(str, &next, base); 1983 *s = (unsigned char)simple_strtoul(str, &next, base);
1886 } 1984 }
1887 break; 1985 break;
1888 case 'h': 1986 case 'h':
1889 if (is_sign) { 1987 if (is_sign) {
1890 short *s = (short *) va_arg(args,short *); 1988 short *s = (short *)va_arg(args, short *);
1891 *s = (short) simple_strtol(str,&next,base); 1989 *s = (short)simple_strtol(str, &next, base);
1892 } else { 1990 } else {
1893 unsigned short *s = (unsigned short *) va_arg(args, unsigned short *); 1991 unsigned short *s = (unsigned short *)va_arg(args, unsigned short *);
1894 *s = (unsigned short) simple_strtoul(str, &next, base); 1992 *s = (unsigned short)simple_strtoul(str, &next, base);
1895 } 1993 }
1896 break; 1994 break;
1897 case 'l': 1995 case 'l':
1898 if (is_sign) { 1996 if (is_sign) {
1899 long *l = (long *) va_arg(args,long *); 1997 long *l = (long *)va_arg(args, long *);
1900 *l = simple_strtol(str,&next,base); 1998 *l = simple_strtol(str, &next, base);
1901 } else { 1999 } else {
1902 unsigned long *l = (unsigned long*) va_arg(args,unsigned long*); 2000 unsigned long *l = (unsigned long *)va_arg(args, unsigned long *);
1903 *l = simple_strtoul(str,&next,base); 2001 *l = simple_strtoul(str, &next, base);
1904 } 2002 }
1905 break; 2003 break;
1906 case 'L': 2004 case 'L':
1907 if (is_sign) { 2005 if (is_sign) {
1908 long long *l = (long long*) va_arg(args,long long *); 2006 long long *l = (long long *)va_arg(args, long long *);
1909 *l = simple_strtoll(str,&next,base); 2007 *l = simple_strtoll(str, &next, base);
1910 } else { 2008 } else {
1911 unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*); 2009 unsigned long long *l = (unsigned long long *)va_arg(args, unsigned long long *);
1912 *l = simple_strtoull(str,&next,base); 2010 *l = simple_strtoull(str, &next, base);
1913 } 2011 }
1914 break; 2012 break;
1915 case 'Z': 2013 case 'Z':
1916 case 'z': 2014 case 'z':
1917 { 2015 {
1918 size_t *s = (size_t*) va_arg(args,size_t*); 2016 size_t *s = (size_t *)va_arg(args, size_t *);
1919 *s = (size_t) simple_strtoul(str,&next,base); 2017 *s = (size_t)simple_strtoul(str, &next, base);
1920 } 2018 }
1921 break; 2019 break;
1922 default: 2020 default:
1923 if (is_sign) { 2021 if (is_sign) {
1924 int *i = (int *) va_arg(args, int*); 2022 int *i = (int *)va_arg(args, int *);
1925 *i = (int) simple_strtol(str,&next,base); 2023 *i = (int)simple_strtol(str, &next, base);
1926 } else { 2024 } else {
1927 unsigned int *i = (unsigned int*) va_arg(args, unsigned int*); 2025 unsigned int *i = (unsigned int *)va_arg(args, unsigned int*);
1928 *i = (unsigned int) simple_strtoul(str,&next,base); 2026 *i = (unsigned int)simple_strtoul(str, &next, base);
1929 } 2027 }
1930 break; 2028 break;
1931 } 2029 }
@@ -1956,14 +2054,15 @@ EXPORT_SYMBOL(vsscanf);
1956 * @fmt: formatting of buffer 2054 * @fmt: formatting of buffer
1957 * @...: resulting arguments 2055 * @...: resulting arguments
1958 */ 2056 */
1959int sscanf(const char * buf, const char * fmt, ...) 2057int sscanf(const char *buf, const char *fmt, ...)
1960{ 2058{
1961 va_list args; 2059 va_list args;
1962 int i; 2060 int i;
1963 2061
1964 va_start(args,fmt); 2062 va_start(args, fmt);
1965 i = vsscanf(buf,fmt,args); 2063 i = vsscanf(buf, fmt, args);
1966 va_end(args); 2064 va_end(args);
2065
1967 return i; 2066 return i;
1968} 2067}
1969EXPORT_SYMBOL(sscanf); 2068EXPORT_SYMBOL(sscanf);