aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-05-15 10:26:50 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-05-15 10:26:50 -0400
commit12e04ffcd93b25dfd726d46338c2ee7d23de556e (patch)
treef91479a62805619168994fd3ee55e3ffa23fc24e /lib
parent9eff37a8713939f218ab8bf0dc93f1d67af7b8b4 (diff)
parentf722406faae2d073cc1d01063d1123c35425939e (diff)
Merge tag 'v3.10-rc1' into stable/for-linus-3.10
Linux 3.10-rc1 * tag 'v3.10-rc1': (12273 commits) Linux 3.10-rc1 [SCSI] qla2xxx: Update firmware link in Kconfig file. [SCSI] iscsi class, qla4xxx: fix sess/conn refcounting when find fns are used [SCSI] sas: unify the pointlessly separated enums sas_dev_type and sas_device_type [SCSI] pm80xx: thermal, sas controller config and error handling update [SCSI] pm80xx: NCQ error handling changes [SCSI] pm80xx: WWN Modification for PM8081/88/89 controllers [SCSI] pm80xx: Changed module name and debug messages update [SCSI] pm80xx: Firmware flash memory free fix, with addition of new memory region for it [SCSI] pm80xx: SPC new firmware changes for device id 0x8081 alone [SCSI] pm80xx: Added SPCv/ve specific hardware functionalities and relevant changes in common files [SCSI] pm80xx: MSI-X implementation for using 64 interrupts [SCSI] pm80xx: Updated common functions common for SPC and SPCv/ve [SCSI] pm80xx: Multiple inbound/outbound queue configuration [SCSI] pm80xx: Added SPCv/ve specific ids, variables and modify for SPC [SCSI] lpfc: fix up Kconfig dependencies [SCSI] Handle MLQUEUE busy response in scsi_send_eh_cmnd dm cache: set config value dm cache: move config fns dm thin: generate event when metadata threshold passed ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug25
-rw-r--r--lib/Makefile7
-rw-r--r--lib/argv_split.c87
-rw-r--r--lib/decompress.c2
-rw-r--r--lib/dump_stack.c11
-rw-r--r--lib/dynamic_debug.c49
-rw-r--r--lib/fault-inject.c2
-rw-r--r--lib/genalloc.c81
-rw-r--r--lib/idr.c27
-rw-r--r--lib/int_sqrt.c32
-rw-r--r--lib/kobject.c9
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/lru_cache.c56
-rw-r--r--lib/notifier-error-inject.c4
-rw-r--r--lib/oid_registry.c5
-rw-r--r--lib/rbtree_test.c9
-rw-r--r--lib/rwsem-spinlock.c38
-rw-r--r--lib/rwsem.c242
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/show_mem.c3
-rw-r--r--lib/string_helpers.c133
-rw-r--r--lib/swiotlb.c19
-rw-r--r--lib/test-string_helpers.c103
-rw-r--r--lib/ucs2_string.c51
-rw-r--r--lib/usercopy.c9
-rw-r--r--lib/uuid.c8
-rw-r--r--lib/vsprintf.c18
28 files changed, 744 insertions, 295 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 3958dc4389f9..fe01d418b09a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -404,4 +404,7 @@ config OID_REGISTRY
404 help 404 help
405 Enable fast lookup object identifier registry. 405 Enable fast lookup object identifier registry.
406 406
407config UCS2_STRING
408 tristate
409
407endmenu 410endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 28be08c09bab..566cf2bc08ea 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1192,7 +1192,7 @@ config MEMORY_NOTIFIER_ERROR_INJECT
1192 bash: echo: write error: Cannot allocate memory 1192 bash: echo: write error: Cannot allocate memory
1193 1193
1194 To compile this code as a module, choose M here: the module will 1194 To compile this code as a module, choose M here: the module will
1195 be called pSeries-reconfig-notifier-error-inject. 1195 be called memory-notifier-error-inject.
1196 1196
1197 If unsure, say N. 1197 If unsure, say N.
1198 1198
@@ -1209,7 +1209,7 @@ config OF_RECONFIG_NOTIFIER_ERROR_INJECT
1209 notified, write the error code to "actions/<notifier event>/error". 1209 notified, write the error code to "actions/<notifier event>/error".
1210 1210
1211 To compile this code as a module, choose M here: the module will 1211 To compile this code as a module, choose M here: the module will
1212 be called memory-notifier-error-inject. 1212 be called of-reconfig-notifier-error-inject.
1213 1213
1214 If unsure, say N. 1214 If unsure, say N.
1215 1215
@@ -1292,6 +1292,24 @@ config LATENCYTOP
1292 Enable this option if you want to use the LatencyTOP tool 1292 Enable this option if you want to use the LatencyTOP tool
1293 to find out which userspace is blocking on what kernel operations. 1293 to find out which userspace is blocking on what kernel operations.
1294 1294
1295config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
1296 bool
1297
1298config DEBUG_STRICT_USER_COPY_CHECKS
1299 bool "Strict user copy size checks"
1300 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
1301 depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
1302 help
1303 Enabling this option turns a certain set of sanity checks for user
1304 copy operations into compile time failures.
1305
1306 The copy_from_user() etc checks are there to help test if there
1307 are sufficient security checks on the length argument of
1308 the copy operation, by having gcc prove that the argument is
1309 within bounds.
1310
1311 If unsure, say N.
1312
1295source mm/Kconfig.debug 1313source mm/Kconfig.debug
1296source kernel/trace/Kconfig 1314source kernel/trace/Kconfig
1297 1315
@@ -1463,5 +1481,8 @@ source "lib/Kconfig.kgdb"
1463 1481
1464source "lib/Kconfig.kmemcheck" 1482source "lib/Kconfig.kmemcheck"
1465 1483
1484config TEST_STRING_HELPERS
1485 tristate "Test functions located in the string_helpers module at runtime"
1486
1466config TEST_KSTRTOX 1487config TEST_KSTRTOX
1467 tristate "Test kstrto*() family of functions at runtime" 1488 tristate "Test kstrto*() family of functions at runtime"
diff --git a/lib/Makefile b/lib/Makefile
index d7946ff75b2e..e9c52e1b853a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -15,6 +15,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
16 earlycpio.o 16 earlycpio.o
17 17
18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
18lib-$(CONFIG_MMU) += ioremap.o 19lib-$(CONFIG_MMU) += ioremap.o
19lib-$(CONFIG_SMP) += cpumask.o 20lib-$(CONFIG_SMP) += cpumask.o
20 21
@@ -22,8 +23,10 @@ lib-y += kobject.o klist.o
22 23
23obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 24obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
24 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 25 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
25 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ 26 gcd.o lcm.o list_sort.o uuid.o flex_array.o \
26 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o 27 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o
28obj-y += string_helpers.o
29obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
27obj-y += kstrtox.o 30obj-y += kstrtox.o
28obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 31obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
29 32
@@ -174,3 +177,5 @@ quiet_cmd_build_OID_registry = GEN $@
174 cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@ 177 cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@
175 178
176clean-files += oid_registry_data.c 179clean-files += oid_registry_data.c
180
181obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
diff --git a/lib/argv_split.c b/lib/argv_split.c
index 1e9a6cbc3689..e927ed0e18a8 100644
--- a/lib/argv_split.c
+++ b/lib/argv_split.c
@@ -8,23 +8,17 @@
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/export.h> 9#include <linux/export.h>
10 10
11static const char *skip_arg(const char *cp)
12{
13 while (*cp && !isspace(*cp))
14 cp++;
15
16 return cp;
17}
18
19static int count_argc(const char *str) 11static int count_argc(const char *str)
20{ 12{
21 int count = 0; 13 int count = 0;
14 bool was_space;
22 15
23 while (*str) { 16 for (was_space = true; *str; str++) {
24 str = skip_spaces(str); 17 if (isspace(*str)) {
25 if (*str) { 18 was_space = true;
19 } else if (was_space) {
20 was_space = false;
26 count++; 21 count++;
27 str = skip_arg(str);
28 } 22 }
29 } 23 }
30 24
@@ -39,10 +33,8 @@ static int count_argc(const char *str)
39 */ 33 */
40void argv_free(char **argv) 34void argv_free(char **argv)
41{ 35{
42 char **p; 36 argv--;
43 for (p = argv; *p; p++) 37 kfree(argv[0]);
44 kfree(*p);
45
46 kfree(argv); 38 kfree(argv);
47} 39}
48EXPORT_SYMBOL(argv_free); 40EXPORT_SYMBOL(argv_free);
@@ -59,43 +51,44 @@ EXPORT_SYMBOL(argv_free);
59 * considered to be a single argument separator. The returned array 51 * considered to be a single argument separator. The returned array
60 * is always NULL-terminated. Returns NULL on memory allocation 52 * is always NULL-terminated. Returns NULL on memory allocation
61 * failure. 53 * failure.
54 *
55 * The source string at `str' may be undergoing concurrent alteration via
56 * userspace sysctl activity (at least). The argv_split() implementation
57 * attempts to handle this gracefully by taking a local copy to work on.
62 */ 58 */
63char **argv_split(gfp_t gfp, const char *str, int *argcp) 59char **argv_split(gfp_t gfp, const char *str, int *argcp)
64{ 60{
65 int argc = count_argc(str); 61 char *argv_str;
66 char **argv = kzalloc(sizeof(*argv) * (argc+1), gfp); 62 bool was_space;
67 char **argvp; 63 char **argv, **argv_ret;
68 64 int argc;
69 if (argv == NULL) 65
70 goto out; 66 argv_str = kstrndup(str, KMALLOC_MAX_SIZE - 1, gfp);
71 67 if (!argv_str)
72 if (argcp) 68 return NULL;
73 *argcp = argc; 69
74 70 argc = count_argc(argv_str);
75 argvp = argv; 71 argv = kmalloc(sizeof(*argv) * (argc + 2), gfp);
76 72 if (!argv) {
77 while (*str) { 73 kfree(argv_str);
78 str = skip_spaces(str); 74 return NULL;
79 75 }
80 if (*str) {
81 const char *p = str;
82 char *t;
83
84 str = skip_arg(str);
85 76
86 t = kstrndup(p, str-p, gfp); 77 *argv = argv_str;
87 if (t == NULL) 78 argv_ret = ++argv;
88 goto fail; 79 for (was_space = true; *argv_str; argv_str++) {
89 *argvp++ = t; 80 if (isspace(*argv_str)) {
81 was_space = true;
82 *argv_str = 0;
83 } else if (was_space) {
84 was_space = false;
85 *argv++ = argv_str;
90 } 86 }
91 } 87 }
92 *argvp = NULL; 88 *argv = NULL;
93
94 out:
95 return argv;
96 89
97 fail: 90 if (argcp)
98 argv_free(argv); 91 *argcp = argc;
99 return NULL; 92 return argv_ret;
100} 93}
101EXPORT_SYMBOL(argv_split); 94EXPORT_SYMBOL(argv_split);
diff --git a/lib/decompress.c b/lib/decompress.c
index 31a804277282..f8fdedaf7b3d 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -38,7 +38,7 @@ struct compress_format {
38 decompress_fn decompressor; 38 decompress_fn decompressor;
39}; 39};
40 40
41static const struct compress_format compressed_formats[] __initdata = { 41static const struct compress_format compressed_formats[] __initconst = {
42 { {037, 0213}, "gzip", gunzip }, 42 { {037, 0213}, "gzip", gunzip },
43 { {037, 0236}, "gzip", gunzip }, 43 { {037, 0236}, "gzip", gunzip },
44 { {0x42, 0x5a}, "bzip2", bunzip2 }, 44 { {0x42, 0x5a}, "bzip2", bunzip2 },
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 42f4f55c9458..53bad099ebd6 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -5,11 +5,16 @@
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/export.h> 7#include <linux/export.h>
8#include <linux/sched.h>
8 9
10/**
11 * dump_stack - dump the current task information and its stack trace
12 *
13 * Architectures can override this implementation by implementing its own.
14 */
9void dump_stack(void) 15void dump_stack(void)
10{ 16{
11 printk(KERN_NOTICE 17 dump_stack_print_info(KERN_DEFAULT);
12 "This architecture does not implement dump_stack()\n"); 18 show_stack(NULL, NULL);
13} 19}
14
15EXPORT_SYMBOL(dump_stack); 20EXPORT_SYMBOL(dump_stack);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 5276b99ca650..99fec3ae405a 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -24,6 +24,7 @@
24#include <linux/sysctl.h> 24#include <linux/sysctl.h>
25#include <linux/ctype.h> 25#include <linux/ctype.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/string_helpers.h>
27#include <linux/uaccess.h> 28#include <linux/uaccess.h>
28#include <linux/dynamic_debug.h> 29#include <linux/dynamic_debug.h>
29#include <linux/debugfs.h> 30#include <linux/debugfs.h>
@@ -276,48 +277,6 @@ static inline int parse_lineno(const char *str, unsigned int *val)
276 return 0; 277 return 0;
277} 278}
278 279
279/*
280 * Undo octal escaping in a string, inplace. This is useful to
281 * allow the user to express a query which matches a format
282 * containing embedded spaces.
283 */
284#define isodigit(c) ((c) >= '0' && (c) <= '7')
285static char *unescape(char *str)
286{
287 char *in = str;
288 char *out = str;
289
290 while (*in) {
291 if (*in == '\\') {
292 if (in[1] == '\\') {
293 *out++ = '\\';
294 in += 2;
295 continue;
296 } else if (in[1] == 't') {
297 *out++ = '\t';
298 in += 2;
299 continue;
300 } else if (in[1] == 'n') {
301 *out++ = '\n';
302 in += 2;
303 continue;
304 } else if (isodigit(in[1]) &&
305 isodigit(in[2]) &&
306 isodigit(in[3])) {
307 *out++ = (((in[1] - '0') << 6) |
308 ((in[2] - '0') << 3) |
309 (in[3] - '0'));
310 in += 4;
311 continue;
312 }
313 }
314 *out++ = *in++;
315 }
316 *out = '\0';
317
318 return str;
319}
320
321static int check_set(const char **dest, char *src, char *name) 280static int check_set(const char **dest, char *src, char *name)
322{ 281{
323 int rc = 0; 282 int rc = 0;
@@ -371,8 +330,10 @@ static int ddebug_parse_query(char *words[], int nwords,
371 } else if (!strcmp(words[i], "module")) { 330 } else if (!strcmp(words[i], "module")) {
372 rc = check_set(&query->module, words[i+1], "module"); 331 rc = check_set(&query->module, words[i+1], "module");
373 } else if (!strcmp(words[i], "format")) { 332 } else if (!strcmp(words[i], "format")) {
374 rc = check_set(&query->format, unescape(words[i+1]), 333 string_unescape_inplace(words[i+1], UNESCAPE_SPACE |
375 "format"); 334 UNESCAPE_OCTAL |
335 UNESCAPE_SPECIAL);
336 rc = check_set(&query->format, words[i+1], "format");
376 } else if (!strcmp(words[i], "line")) { 337 } else if (!strcmp(words[i], "line")) {
377 char *first = words[i+1]; 338 char *first = words[i+1];
378 char *last = strchr(first, '-'); 339 char *last = strchr(first, '-');
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index f7210ad6cffd..c5c7a762b850 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -122,7 +122,7 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
122 return false; 122 return false;
123 } 123 }
124 124
125 if (attr->probability <= random32() % 100) 125 if (attr->probability <= prandom_u32() % 100)
126 return false; 126 return false;
127 127
128 if (!fail_stacktrace(attr)) 128 if (!fail_stacktrace(attr))
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 54920433705a..b35cfa9bc3d4 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -34,6 +34,8 @@
34#include <linux/rculist.h> 34#include <linux/rculist.h>
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/genalloc.h> 36#include <linux/genalloc.h>
37#include <linux/of_address.h>
38#include <linux/of_device.h>
37 39
38static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) 40static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
39{ 41{
@@ -480,3 +482,82 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
480 return start_bit; 482 return start_bit;
481} 483}
482EXPORT_SYMBOL(gen_pool_best_fit); 484EXPORT_SYMBOL(gen_pool_best_fit);
485
486static void devm_gen_pool_release(struct device *dev, void *res)
487{
488 gen_pool_destroy(*(struct gen_pool **)res);
489}
490
491/**
492 * devm_gen_pool_create - managed gen_pool_create
493 * @dev: device that provides the gen_pool
494 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
495 * @nid: node id of the node the pool structure should be allocated on, or -1
496 *
497 * Create a new special memory pool that can be used to manage special purpose
498 * memory not managed by the regular kmalloc/kfree interface. The pool will be
499 * automatically destroyed by the device management code.
500 */
501struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
502 int nid)
503{
504 struct gen_pool **ptr, *pool;
505
506 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
507
508 pool = gen_pool_create(min_alloc_order, nid);
509 if (pool) {
510 *ptr = pool;
511 devres_add(dev, ptr);
512 } else {
513 devres_free(ptr);
514 }
515
516 return pool;
517}
518
519/**
520 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
521 * @dev: device to retrieve the gen_pool from
522 * @name: Optional name for the gen_pool, usually NULL
523 *
524 * Returns the gen_pool for the device if one is present, or NULL.
525 */
526struct gen_pool *dev_get_gen_pool(struct device *dev)
527{
528 struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
529 NULL);
530
531 if (!p)
532 return NULL;
533 return *p;
534}
535EXPORT_SYMBOL_GPL(dev_get_gen_pool);
536
537#ifdef CONFIG_OF
538/**
539 * of_get_named_gen_pool - find a pool by phandle property
540 * @np: device node
541 * @propname: property name containing phandle(s)
542 * @index: index into the phandle array
543 *
544 * Returns the pool that contains the chunk starting at the physical
545 * address of the device tree node pointed at by the phandle property,
546 * or NULL if not found.
547 */
548struct gen_pool *of_get_named_gen_pool(struct device_node *np,
549 const char *propname, int index)
550{
551 struct platform_device *pdev;
552 struct device_node *np_pool;
553
554 np_pool = of_parse_phandle(np, propname, index);
555 if (!np_pool)
556 return NULL;
557 pdev = of_find_device_by_node(np_pool);
558 if (!pdev)
559 return NULL;
560 return dev_get_gen_pool(&pdev->dev);
561}
562EXPORT_SYMBOL_GPL(of_get_named_gen_pool);
563#endif /* CONFIG_OF */
diff --git a/lib/idr.c b/lib/idr.c
index 322e2816f2fb..cca4b9302a71 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -495,6 +495,33 @@ int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
495} 495}
496EXPORT_SYMBOL_GPL(idr_alloc); 496EXPORT_SYMBOL_GPL(idr_alloc);
497 497
498/**
499 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
500 * @idr: the (initialized) idr
501 * @ptr: pointer to be associated with the new id
502 * @start: the minimum id (inclusive)
503 * @end: the maximum id (exclusive, <= 0 for max)
504 * @gfp_mask: memory allocation flags
505 *
506 * Essentially the same as idr_alloc, but prefers to allocate progressively
507 * higher ids if it can. If the "cur" counter wraps, then it will start again
508 * at the "start" end of the range and allocate one that has already been used.
509 */
510int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
511 gfp_t gfp_mask)
512{
513 int id;
514
515 id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
516 if (id == -ENOSPC)
517 id = idr_alloc(idr, ptr, start, end, gfp_mask);
518
519 if (likely(id >= 0))
520 idr->cur = id + 1;
521 return id;
522}
523EXPORT_SYMBOL(idr_alloc_cyclic);
524
498static void idr_remove_warning(int id) 525static void idr_remove_warning(int id)
499{ 526{
500 printk(KERN_WARNING 527 printk(KERN_WARNING
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index fc2eeb7cb2ea..1ef4cc344977 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -1,3 +1,9 @@
1/*
2 * Copyright (C) 2013 Davidlohr Bueso <davidlohr.bueso@hp.com>
3 *
4 * Based on the shift-and-subtract algorithm for computing integer
5 * square root from Guy L. Steele.
6 */
1 7
2#include <linux/kernel.h> 8#include <linux/kernel.h>
3#include <linux/export.h> 9#include <linux/export.h>
@@ -10,23 +16,23 @@
10 */ 16 */
11unsigned long int_sqrt(unsigned long x) 17unsigned long int_sqrt(unsigned long x)
12{ 18{
13 unsigned long op, res, one; 19 unsigned long b, m, y = 0;
14 20
15 op = x; 21 if (x <= 1)
16 res = 0; 22 return x;
17 23
18 one = 1UL << (BITS_PER_LONG - 2); 24 m = 1UL << (BITS_PER_LONG - 2);
19 while (one > op) 25 while (m != 0) {
20 one >>= 2; 26 b = y + m;
27 y >>= 1;
21 28
22 while (one != 0) { 29 if (x >= b) {
23 if (op >= res + one) { 30 x -= b;
24 op = op - (res + one); 31 y += m;
25 res = res + 2 * one;
26 } 32 }
27 res /= 2; 33 m >>= 2;
28 one /= 4;
29 } 34 }
30 return res; 35
36 return y;
31} 37}
32EXPORT_SYMBOL(int_sqrt); 38EXPORT_SYMBOL(int_sqrt);
diff --git a/lib/kobject.c b/lib/kobject.c
index e07ee1fcd6f1..b7e29a6056d3 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -529,6 +529,13 @@ struct kobject *kobject_get(struct kobject *kobj)
529 return kobj; 529 return kobj;
530} 530}
531 531
532static struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
533{
534 if (!kref_get_unless_zero(&kobj->kref))
535 kobj = NULL;
536 return kobj;
537}
538
532/* 539/*
533 * kobject_cleanup - free kobject resources. 540 * kobject_cleanup - free kobject resources.
534 * @kobj: object to cleanup 541 * @kobj: object to cleanup
@@ -751,7 +758,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
751 758
752 list_for_each_entry(k, &kset->list, entry) { 759 list_for_each_entry(k, &kset->list, entry) {
753 if (kobject_name(k) && !strcmp(kobject_name(k), name)) { 760 if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
754 ret = kobject_get(k); 761 ret = kobject_get_unless_zero(k);
755 break; 762 break;
756 } 763 }
757 } 764 }
diff --git a/lib/list_sort.c b/lib/list_sort.c
index d7325c6b103f..1183fa70a44d 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -229,7 +229,7 @@ static int __init list_sort_test(void)
229 goto exit; 229 goto exit;
230 } 230 }
231 /* force some equivalencies */ 231 /* force some equivalencies */
232 el->value = random32() % (TEST_LIST_LEN/3); 232 el->value = prandom_u32() % (TEST_LIST_LEN / 3);
233 el->serial = i; 233 el->serial = i;
234 el->poison1 = TEST_POISON1; 234 el->poison1 = TEST_POISON1;
235 el->poison2 = TEST_POISON2; 235 el->poison2 = TEST_POISON2;
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index 8335d39d2ccd..4a83ecd03650 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -365,7 +365,13 @@ static int lc_unused_element_available(struct lru_cache *lc)
365 return 0; 365 return 0;
366} 366}
367 367
368static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool may_change) 368/* used as internal flags to __lc_get */
369enum {
370 LC_GET_MAY_CHANGE = 1,
371 LC_GET_MAY_USE_UNCOMMITTED = 2,
372};
373
374static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsigned int flags)
369{ 375{
370 struct lc_element *e; 376 struct lc_element *e;
371 377
@@ -380,22 +386,31 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool
380 * this enr is currently being pulled in already, 386 * this enr is currently being pulled in already,
381 * and will be available once the pending transaction 387 * and will be available once the pending transaction
382 * has been committed. */ 388 * has been committed. */
383 if (e && e->lc_new_number == e->lc_number) { 389 if (e) {
390 if (e->lc_new_number != e->lc_number) {
391 /* It has been found above, but on the "to_be_changed"
392 * list, not yet committed. Don't pull it in twice,
393 * wait for the transaction, then try again...
394 */
395 if (!(flags & LC_GET_MAY_USE_UNCOMMITTED))
396 RETURN(NULL);
397 /* ... unless the caller is aware of the implications,
398 * probably preparing a cumulative transaction. */
399 ++e->refcnt;
400 ++lc->hits;
401 RETURN(e);
402 }
403 /* else: lc_new_number == lc_number; a real hit. */
384 ++lc->hits; 404 ++lc->hits;
385 if (e->refcnt++ == 0) 405 if (e->refcnt++ == 0)
386 lc->used++; 406 lc->used++;
387 list_move(&e->list, &lc->in_use); /* Not evictable... */ 407 list_move(&e->list, &lc->in_use); /* Not evictable... */
388 RETURN(e); 408 RETURN(e);
389 } 409 }
410 /* e == NULL */
390 411
391 ++lc->misses; 412 ++lc->misses;
392 if (!may_change) 413 if (!(flags & LC_GET_MAY_CHANGE))
393 RETURN(NULL);
394
395 /* It has been found above, but on the "to_be_changed" list, not yet
396 * committed. Don't pull it in twice, wait for the transaction, then
397 * try again */
398 if (e)
399 RETURN(NULL); 414 RETURN(NULL);
400 415
401 /* To avoid races with lc_try_lock(), first, mark us dirty 416 /* To avoid races with lc_try_lock(), first, mark us dirty
@@ -477,7 +492,27 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool
477 */ 492 */
478struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr) 493struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
479{ 494{
480 return __lc_get(lc, enr, 1); 495 return __lc_get(lc, enr, LC_GET_MAY_CHANGE);
496}
497
498/**
499 * lc_get_cumulative - like lc_get; also finds to-be-changed elements
500 * @lc: the lru cache to operate on
501 * @enr: the label to look up
502 *
503 * Unlike lc_get this also returns the element for @enr, if it is belonging to
504 * a pending transaction, so the return values are like for lc_get(),
505 * plus:
506 *
507 * pointer to an element already on the "to_be_changed" list.
508 * In this case, the cache was already marked %LC_DIRTY.
509 *
510 * Caller needs to make sure that the pending transaction is completed,
511 * before proceeding to actually use this element.
512 */
513struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr)
514{
515 return __lc_get(lc, enr, LC_GET_MAY_CHANGE|LC_GET_MAY_USE_UNCOMMITTED);
481} 516}
482 517
483/** 518/**
@@ -648,3 +683,4 @@ EXPORT_SYMBOL(lc_seq_printf_stats);
648EXPORT_SYMBOL(lc_seq_dump_details); 683EXPORT_SYMBOL(lc_seq_dump_details);
649EXPORT_SYMBOL(lc_try_lock); 684EXPORT_SYMBOL(lc_try_lock);
650EXPORT_SYMBOL(lc_is_used); 685EXPORT_SYMBOL(lc_is_used);
686EXPORT_SYMBOL(lc_get_cumulative);
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index 44b92cb6224f..eb4a04afea80 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -17,7 +17,7 @@ static int debugfs_errno_get(void *data, u64 *val)
17DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set, 17DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set,
18 "%lld\n"); 18 "%lld\n");
19 19
20static struct dentry *debugfs_create_errno(const char *name, mode_t mode, 20static struct dentry *debugfs_create_errno(const char *name, umode_t mode,
21 struct dentry *parent, int *value) 21 struct dentry *parent, int *value)
22{ 22{
23 return debugfs_create_file(name, mode, parent, value, &fops_errno); 23 return debugfs_create_file(name, mode, parent, value, &fops_errno);
@@ -50,7 +50,7 @@ struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent,
50 struct notifier_err_inject *err_inject, int priority) 50 struct notifier_err_inject *err_inject, int priority)
51{ 51{
52 struct notifier_err_inject_action *action; 52 struct notifier_err_inject_action *action;
53 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 53 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
54 struct dentry *dir; 54 struct dentry *dir;
55 struct dentry *actions_dir; 55 struct dentry *actions_dir;
56 56
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index d8de11f45908..318f382a010d 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -9,6 +9,7 @@
9 * 2 of the Licence, or (at your option) any later version. 9 * 2 of the Licence, or (at your option) any later version.
10 */ 10 */
11 11
12#include <linux/module.h>
12#include <linux/export.h> 13#include <linux/export.h>
13#include <linux/oid_registry.h> 14#include <linux/oid_registry.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -16,6 +17,10 @@
16#include <linux/bug.h> 17#include <linux/bug.h>
17#include "oid_registry_data.c" 18#include "oid_registry_data.c"
18 19
20MODULE_DESCRIPTION("OID Registry");
21MODULE_AUTHOR("Red Hat, Inc.");
22MODULE_LICENSE("GPL");
23
19/** 24/**
20 * look_up_OID - Find an OID registration for the specified data 25 * look_up_OID - Find an OID registration for the specified data
21 * @data: Binary representation of the OID 26 * @data: Binary representation of the OID
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index af38aedbd874..122f02f9941b 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -117,8 +117,7 @@ static int black_path_count(struct rb_node *rb)
117static void check(int nr_nodes) 117static void check(int nr_nodes)
118{ 118{
119 struct rb_node *rb; 119 struct rb_node *rb;
120 int count = 0; 120 int count = 0, blacks = 0;
121 int blacks = 0;
122 u32 prev_key = 0; 121 u32 prev_key = 0;
123 122
124 for (rb = rb_first(&root); rb; rb = rb_next(rb)) { 123 for (rb = rb_first(&root); rb; rb = rb_next(rb)) {
@@ -134,7 +133,9 @@ static void check(int nr_nodes)
134 prev_key = node->key; 133 prev_key = node->key;
135 count++; 134 count++;
136 } 135 }
136
137 WARN_ON_ONCE(count != nr_nodes); 137 WARN_ON_ONCE(count != nr_nodes);
138 WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1);
138} 139}
139 140
140static void check_augmented(int nr_nodes) 141static void check_augmented(int nr_nodes)
@@ -148,7 +149,7 @@ static void check_augmented(int nr_nodes)
148 } 149 }
149} 150}
150 151
151static int rbtree_test_init(void) 152static int __init rbtree_test_init(void)
152{ 153{
153 int i, j; 154 int i, j;
154 cycles_t time1, time2, time; 155 cycles_t time1, time2, time;
@@ -221,7 +222,7 @@ static int rbtree_test_init(void)
221 return -EAGAIN; /* Fail will directly unload the module */ 222 return -EAGAIN; /* Fail will directly unload the module */
222} 223}
223 224
224static void rbtree_test_exit(void) 225static void __exit rbtree_test_exit(void)
225{ 226{
226 printk(KERN_ALERT "test exit\n"); 227 printk(KERN_ALERT "test exit\n");
227} 228}
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 7542afbb22b3..9be8a9144978 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -9,12 +9,15 @@
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/export.h> 10#include <linux/export.h>
11 11
12enum rwsem_waiter_type {
13 RWSEM_WAITING_FOR_WRITE,
14 RWSEM_WAITING_FOR_READ
15};
16
12struct rwsem_waiter { 17struct rwsem_waiter {
13 struct list_head list; 18 struct list_head list;
14 struct task_struct *task; 19 struct task_struct *task;
15 unsigned int flags; 20 enum rwsem_waiter_type type;
16#define RWSEM_WAITING_FOR_READ 0x00000001
17#define RWSEM_WAITING_FOR_WRITE 0x00000002
18}; 21};
19 22
20int rwsem_is_locked(struct rw_semaphore *sem) 23int rwsem_is_locked(struct rw_semaphore *sem)
@@ -67,26 +70,17 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
67 70
68 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); 71 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
69 72
70 if (!wakewrite) { 73 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
71 if (waiter->flags & RWSEM_WAITING_FOR_WRITE) 74 if (wakewrite)
72 goto out; 75 /* Wake up a writer. Note that we do not grant it the
73 goto dont_wake_writers; 76 * lock - it will have to acquire it when it runs. */
74 } 77 wake_up_process(waiter->task);
75
76 /*
77 * as we support write lock stealing, we can't set sem->activity
78 * to -1 here to indicate we get the lock. Instead, we wake it up
79 * to let it go get it again.
80 */
81 if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
82 wake_up_process(waiter->task);
83 goto out; 78 goto out;
84 } 79 }
85 80
86 /* grant an infinite number of read locks to the front of the queue */ 81 /* grant an infinite number of read locks to the front of the queue */
87 dont_wake_writers:
88 woken = 0; 82 woken = 0;
89 while (waiter->flags & RWSEM_WAITING_FOR_READ) { 83 do {
90 struct list_head *next = waiter->list.next; 84 struct list_head *next = waiter->list.next;
91 85
92 list_del(&waiter->list); 86 list_del(&waiter->list);
@@ -96,10 +90,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
96 wake_up_process(tsk); 90 wake_up_process(tsk);
97 put_task_struct(tsk); 91 put_task_struct(tsk);
98 woken++; 92 woken++;
99 if (list_empty(&sem->wait_list)) 93 if (next == &sem->wait_list)
100 break; 94 break;
101 waiter = list_entry(next, struct rwsem_waiter, list); 95 waiter = list_entry(next, struct rwsem_waiter, list);
102 } 96 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
103 97
104 sem->activity += woken; 98 sem->activity += woken;
105 99
@@ -144,7 +138,7 @@ void __sched __down_read(struct rw_semaphore *sem)
144 138
145 /* set up my own style of waitqueue */ 139 /* set up my own style of waitqueue */
146 waiter.task = tsk; 140 waiter.task = tsk;
147 waiter.flags = RWSEM_WAITING_FOR_READ; 141 waiter.type = RWSEM_WAITING_FOR_READ;
148 get_task_struct(tsk); 142 get_task_struct(tsk);
149 143
150 list_add_tail(&waiter.list, &sem->wait_list); 144 list_add_tail(&waiter.list, &sem->wait_list);
@@ -201,7 +195,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
201 /* set up my own style of waitqueue */ 195 /* set up my own style of waitqueue */
202 tsk = current; 196 tsk = current;
203 waiter.task = tsk; 197 waiter.task = tsk;
204 waiter.flags = RWSEM_WAITING_FOR_WRITE; 198 waiter.type = RWSEM_WAITING_FOR_WRITE;
205 list_add_tail(&waiter.list, &sem->wait_list); 199 list_add_tail(&waiter.list, &sem->wait_list);
206 200
207 /* wait for someone to release the lock */ 201 /* wait for someone to release the lock */
diff --git a/lib/rwsem.c b/lib/rwsem.c
index ad5e0df16ab4..19c5fa95e0b4 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -4,6 +4,7 @@
4 * Derived from arch/i386/kernel/semaphore.c 4 * Derived from arch/i386/kernel/semaphore.c
5 * 5 *
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com> 6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7 * and Michel Lespinasse <walken@google.com>
7 */ 8 */
8#include <linux/rwsem.h> 9#include <linux/rwsem.h>
9#include <linux/sched.h> 10#include <linux/sched.h>
@@ -30,21 +31,22 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
30 31
31EXPORT_SYMBOL(__init_rwsem); 32EXPORT_SYMBOL(__init_rwsem);
32 33
34enum rwsem_waiter_type {
35 RWSEM_WAITING_FOR_WRITE,
36 RWSEM_WAITING_FOR_READ
37};
38
33struct rwsem_waiter { 39struct rwsem_waiter {
34 struct list_head list; 40 struct list_head list;
35 struct task_struct *task; 41 struct task_struct *task;
36 unsigned int flags; 42 enum rwsem_waiter_type type;
37#define RWSEM_WAITING_FOR_READ 0x00000001
38#define RWSEM_WAITING_FOR_WRITE 0x00000002
39}; 43};
40 44
41/* Wake types for __rwsem_do_wake(). Note that RWSEM_WAKE_NO_ACTIVE and 45enum rwsem_wake_type {
42 * RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held 46 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
43 * since the rwsem value was observed. 47 RWSEM_WAKE_READERS, /* Wake readers only */
44 */ 48 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
45#define RWSEM_WAKE_ANY 0 /* Wake whatever's at head of wait list */ 49};
46#define RWSEM_WAKE_NO_ACTIVE 1 /* rwsem was observed with no active thread */
47#define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */
48 50
49/* 51/*
50 * handle the lock release when processes blocked on it that can now run 52 * handle the lock release when processes blocked on it that can now run
@@ -57,46 +59,43 @@ struct rwsem_waiter {
57 * - writers are only woken if downgrading is false 59 * - writers are only woken if downgrading is false
58 */ 60 */
59static struct rw_semaphore * 61static struct rw_semaphore *
60__rwsem_do_wake(struct rw_semaphore *sem, int wake_type) 62__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
61{ 63{
62 struct rwsem_waiter *waiter; 64 struct rwsem_waiter *waiter;
63 struct task_struct *tsk; 65 struct task_struct *tsk;
64 struct list_head *next; 66 struct list_head *next;
65 signed long woken, loop, adjustment; 67 long oldcount, woken, loop, adjustment;
66 68
67 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); 69 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
68 if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) 70 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
69 goto readers_only; 71 if (wake_type == RWSEM_WAKE_ANY)
70 72 /* Wake writer at the front of the queue, but do not
71 if (wake_type == RWSEM_WAKE_READ_OWNED) 73 * grant it the lock yet as we want other writers
72 /* Another active reader was observed, so wakeup is not 74 * to be able to steal it. Readers, on the other hand,
73 * likely to succeed. Save the atomic op. 75 * will block as they will notice the queued writer.
74 */ 76 */
77 wake_up_process(waiter->task);
75 goto out; 78 goto out;
79 }
76 80
77 /* Wake up the writing waiter and let the task grab the sem: */ 81 /* Writers might steal the lock before we grant it to the next reader.
78 wake_up_process(waiter->task); 82 * We prefer to do the first reader grant before counting readers
79 goto out; 83 * so we can bail out early if a writer stole the lock.
80
81 readers_only:
82 /* If we come here from up_xxxx(), another thread might have reached
83 * rwsem_down_failed_common() before we acquired the spinlock and
84 * woken up a waiter, making it now active. We prefer to check for
85 * this first in order to not spend too much time with the spinlock
86 * held if we're not going to be able to wake up readers in the end.
87 *
88 * Note that we do not need to update the rwsem count: any writer
89 * trying to acquire rwsem will run rwsem_down_write_failed() due
90 * to the waiting threads and block trying to acquire the spinlock.
91 *
92 * We use a dummy atomic update in order to acquire the cache line
93 * exclusively since we expect to succeed and run the final rwsem
94 * count adjustment pretty soon.
95 */ 84 */
96 if (wake_type == RWSEM_WAKE_ANY && 85 adjustment = 0;
97 rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS) 86 if (wake_type != RWSEM_WAKE_READ_OWNED) {
98 /* Someone grabbed the sem for write already */ 87 adjustment = RWSEM_ACTIVE_READ_BIAS;
99 goto out; 88 try_reader_grant:
89 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
90 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
91 /* A writer stole the lock. Undo our reader grant. */
92 if (rwsem_atomic_update(-adjustment, sem) &
93 RWSEM_ACTIVE_MASK)
94 goto out;
95 /* Last active locker left. Retry waking readers. */
96 goto try_reader_grant;
97 }
98 }
100 99
101 /* Grant an infinite number of read locks to the readers at the front 100 /* Grant an infinite number of read locks to the readers at the front
102 * of the queue. Note we increment the 'active part' of the count by 101 * of the queue. Note we increment the 'active part' of the count by
@@ -112,17 +111,19 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
112 waiter = list_entry(waiter->list.next, 111 waiter = list_entry(waiter->list.next,
113 struct rwsem_waiter, list); 112 struct rwsem_waiter, list);
114 113
115 } while (waiter->flags & RWSEM_WAITING_FOR_READ); 114 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
116 115
117 adjustment = woken * RWSEM_ACTIVE_READ_BIAS; 116 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
118 if (waiter->flags & RWSEM_WAITING_FOR_READ) 117 if (waiter->type != RWSEM_WAITING_FOR_WRITE)
119 /* hit end of list above */ 118 /* hit end of list above */
120 adjustment -= RWSEM_WAITING_BIAS; 119 adjustment -= RWSEM_WAITING_BIAS;
121 120
122 rwsem_atomic_add(adjustment, sem); 121 if (adjustment)
122 rwsem_atomic_add(adjustment, sem);
123 123
124 next = sem->wait_list.next; 124 next = sem->wait_list.next;
125 for (loop = woken; loop > 0; loop--) { 125 loop = woken;
126 do {
126 waiter = list_entry(next, struct rwsem_waiter, list); 127 waiter = list_entry(next, struct rwsem_waiter, list);
127 next = waiter->list.next; 128 next = waiter->list.next;
128 tsk = waiter->task; 129 tsk = waiter->task;
@@ -130,7 +131,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
130 waiter->task = NULL; 131 waiter->task = NULL;
131 wake_up_process(tsk); 132 wake_up_process(tsk);
132 put_task_struct(tsk); 133 put_task_struct(tsk);
133 } 134 } while (--loop);
134 135
135 sem->wait_list.next = next; 136 sem->wait_list.next = next;
136 next->prev = &sem->wait_list; 137 next->prev = &sem->wait_list;
@@ -139,60 +140,21 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
139 return sem; 140 return sem;
140} 141}
141 142
142/* Try to get write sem, caller holds sem->wait_lock: */
143static int try_get_writer_sem(struct rw_semaphore *sem,
144 struct rwsem_waiter *waiter)
145{
146 struct rwsem_waiter *fwaiter;
147 long oldcount, adjustment;
148
149 /* only steal when first waiter is writing */
150 fwaiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
151 if (!(fwaiter->flags & RWSEM_WAITING_FOR_WRITE))
152 return 0;
153
154 adjustment = RWSEM_ACTIVE_WRITE_BIAS;
155 /* Only one waiter in the queue: */
156 if (fwaiter == waiter && waiter->list.next == &sem->wait_list)
157 adjustment -= RWSEM_WAITING_BIAS;
158
159try_again_write:
160 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
161 if (!(oldcount & RWSEM_ACTIVE_MASK)) {
162 /* No active lock: */
163 struct task_struct *tsk = waiter->task;
164
165 list_del(&waiter->list);
166 smp_mb();
167 put_task_struct(tsk);
168 tsk->state = TASK_RUNNING;
169 return 1;
170 }
171 /* some one grabbed the sem already */
172 if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK)
173 return 0;
174 goto try_again_write;
175}
176
177/* 143/*
178 * wait for a lock to be granted 144 * wait for the read lock to be granted
179 */ 145 */
180static struct rw_semaphore __sched * 146struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
181rwsem_down_failed_common(struct rw_semaphore *sem,
182 unsigned int flags, signed long adjustment)
183{ 147{
148 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
184 struct rwsem_waiter waiter; 149 struct rwsem_waiter waiter;
185 struct task_struct *tsk = current; 150 struct task_struct *tsk = current;
186 signed long count;
187
188 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
189 151
190 /* set up my own style of waitqueue */ 152 /* set up my own style of waitqueue */
191 raw_spin_lock_irq(&sem->wait_lock);
192 waiter.task = tsk; 153 waiter.task = tsk;
193 waiter.flags = flags; 154 waiter.type = RWSEM_WAITING_FOR_READ;
194 get_task_struct(tsk); 155 get_task_struct(tsk);
195 156
157 raw_spin_lock_irq(&sem->wait_lock);
196 if (list_empty(&sem->wait_list)) 158 if (list_empty(&sem->wait_list))
197 adjustment += RWSEM_WAITING_BIAS; 159 adjustment += RWSEM_WAITING_BIAS;
198 list_add_tail(&waiter.list, &sem->wait_list); 160 list_add_tail(&waiter.list, &sem->wait_list);
@@ -200,35 +162,24 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
200 /* we're now waiting on the lock, but no longer actively locking */ 162 /* we're now waiting on the lock, but no longer actively locking */
201 count = rwsem_atomic_update(adjustment, sem); 163 count = rwsem_atomic_update(adjustment, sem);
202 164
203 /* If there are no active locks, wake the front queued process(es) up. 165 /* If there are no active locks, wake the front queued process(es).
204 * 166 *
205 * Alternatively, if we're called from a failed down_write(), there 167 * If there are no writers and we are first in the queue,
206 * were already threads queued before us and there are no active 168 * wake our own waiter to join the existing active readers !
207 * writers, the lock must be read owned; so we try to wake any read 169 */
208 * locks that were queued ahead of us. */ 170 if (count == RWSEM_WAITING_BIAS ||
209 if (count == RWSEM_WAITING_BIAS) 171 (count > RWSEM_WAITING_BIAS &&
210 sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE); 172 adjustment != -RWSEM_ACTIVE_READ_BIAS))
211 else if (count > RWSEM_WAITING_BIAS && 173 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
212 adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
213 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
214 174
215 raw_spin_unlock_irq(&sem->wait_lock); 175 raw_spin_unlock_irq(&sem->wait_lock);
216 176
217 /* wait to be given the lock */ 177 /* wait to be given the lock */
218 for (;;) { 178 while (true) {
179 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
219 if (!waiter.task) 180 if (!waiter.task)
220 break; 181 break;
221
222 raw_spin_lock_irq(&sem->wait_lock);
223 /* Try to get the writer sem, may steal from the head writer: */
224 if (flags == RWSEM_WAITING_FOR_WRITE)
225 if (try_get_writer_sem(sem, &waiter)) {
226 raw_spin_unlock_irq(&sem->wait_lock);
227 return sem;
228 }
229 raw_spin_unlock_irq(&sem->wait_lock);
230 schedule(); 182 schedule();
231 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
232 } 183 }
233 184
234 tsk->state = TASK_RUNNING; 185 tsk->state = TASK_RUNNING;
@@ -237,21 +188,64 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
237} 188}
238 189
239/* 190/*
240 * wait for the read lock to be granted 191 * wait until we successfully acquire the write lock
241 */
242struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
243{
244 return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
245 -RWSEM_ACTIVE_READ_BIAS);
246}
247
248/*
249 * wait for the write lock to be granted
250 */ 192 */
251struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) 193struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
252{ 194{
253 return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE, 195 long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
254 -RWSEM_ACTIVE_WRITE_BIAS); 196 struct rwsem_waiter waiter;
197 struct task_struct *tsk = current;
198
199 /* set up my own style of waitqueue */
200 waiter.task = tsk;
201 waiter.type = RWSEM_WAITING_FOR_WRITE;
202
203 raw_spin_lock_irq(&sem->wait_lock);
204 if (list_empty(&sem->wait_list))
205 adjustment += RWSEM_WAITING_BIAS;
206 list_add_tail(&waiter.list, &sem->wait_list);
207
208 /* we're now waiting on the lock, but no longer actively locking */
209 count = rwsem_atomic_update(adjustment, sem);
210
211 /* If there were already threads queued before us and there are no
212 * active writers, the lock must be read owned; so we try to wake
213 * any read locks that were queued ahead of us. */
214 if (count > RWSEM_WAITING_BIAS &&
215 adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
216 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
217
218 /* wait until we successfully acquire the lock */
219 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
220 while (true) {
221 if (!(count & RWSEM_ACTIVE_MASK)) {
222 /* Try acquiring the write lock. */
223 count = RWSEM_ACTIVE_WRITE_BIAS;
224 if (!list_is_singular(&sem->wait_list))
225 count += RWSEM_WAITING_BIAS;
226
227 if (sem->count == RWSEM_WAITING_BIAS &&
228 cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
229 RWSEM_WAITING_BIAS)
230 break;
231 }
232
233 raw_spin_unlock_irq(&sem->wait_lock);
234
235 /* Block until there are no active lockers. */
236 do {
237 schedule();
238 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
239 } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
240
241 raw_spin_lock_irq(&sem->wait_lock);
242 }
243
244 list_del(&waiter.list);
245 raw_spin_unlock_irq(&sem->wait_lock);
246 tsk->state = TASK_RUNNING;
247
248 return sem;
255} 249}
256 250
257/* 251/*
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b83c144d731f..a1cf8cae60e7 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -401,7 +401,6 @@ void __sg_page_iter_start(struct sg_page_iter *piter,
401 piter->__pg_advance = 0; 401 piter->__pg_advance = 0;
402 piter->__nents = nents; 402 piter->__nents = nents;
403 403
404 piter->page = NULL;
405 piter->sg = sglist; 404 piter->sg = sglist;
406 piter->sg_pgoffset = pgoffset; 405 piter->sg_pgoffset = pgoffset;
407} 406}
@@ -426,7 +425,6 @@ bool __sg_page_iter_next(struct sg_page_iter *piter)
426 if (!--piter->__nents || !piter->sg) 425 if (!--piter->__nents || !piter->sg)
427 return false; 426 return false;
428 } 427 }
429 piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset);
430 428
431 return true; 429 return true;
432} 430}
@@ -496,7 +494,7 @@ bool sg_miter_next(struct sg_mapping_iter *miter)
496 miter->__remaining = min_t(unsigned long, miter->__remaining, 494 miter->__remaining = min_t(unsigned long, miter->__remaining,
497 PAGE_SIZE - miter->__offset); 495 PAGE_SIZE - miter->__offset);
498 } 496 }
499 miter->page = miter->piter.page; 497 miter->page = sg_page_iter_page(&miter->piter);
500 miter->consumed = miter->length = miter->__remaining; 498 miter->consumed = miter->length = miter->__remaining;
501 499
502 if (miter->__flags & SG_MITER_ATOMIC) 500 if (miter->__flags & SG_MITER_ATOMIC)
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 4407f8c9b1f7..b7c72311ad0c 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -18,6 +18,9 @@ void show_mem(unsigned int filter)
18 printk("Mem-Info:\n"); 18 printk("Mem-Info:\n");
19 show_free_areas(filter); 19 show_free_areas(filter);
20 20
21 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
22 return;
23
21 for_each_online_pgdat(pgdat) { 24 for_each_online_pgdat(pgdat) {
22 unsigned long i, flags; 25 unsigned long i, flags;
23 26
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 1cffc223bff5..ed5c1454dd62 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -2,10 +2,12 @@
2 * Helpers for formatting and printing strings 2 * Helpers for formatting and printing strings
3 * 3 *
4 * Copyright 31 August 2008 James Bottomley 4 * Copyright 31 August 2008 James Bottomley
5 * Copyright (C) 2013, Intel Corporation
5 */ 6 */
6#include <linux/kernel.h> 7#include <linux/kernel.h>
7#include <linux/math64.h> 8#include <linux/math64.h>
8#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/ctype.h>
9#include <linux/string_helpers.h> 11#include <linux/string_helpers.h>
10 12
11/** 13/**
@@ -66,3 +68,134 @@ int string_get_size(u64 size, const enum string_size_units units,
66 return 0; 68 return 0;
67} 69}
68EXPORT_SYMBOL(string_get_size); 70EXPORT_SYMBOL(string_get_size);
71
72static bool unescape_space(char **src, char **dst)
73{
74 char *p = *dst, *q = *src;
75
76 switch (*q) {
77 case 'n':
78 *p = '\n';
79 break;
80 case 'r':
81 *p = '\r';
82 break;
83 case 't':
84 *p = '\t';
85 break;
86 case 'v':
87 *p = '\v';
88 break;
89 case 'f':
90 *p = '\f';
91 break;
92 default:
93 return false;
94 }
95 *dst += 1;
96 *src += 1;
97 return true;
98}
99
100static bool unescape_octal(char **src, char **dst)
101{
102 char *p = *dst, *q = *src;
103 u8 num;
104
105 if (isodigit(*q) == 0)
106 return false;
107
108 num = (*q++) & 7;
109 while (num < 32 && isodigit(*q) && (q - *src < 3)) {
110 num <<= 3;
111 num += (*q++) & 7;
112 }
113 *p = num;
114 *dst += 1;
115 *src = q;
116 return true;
117}
118
119static bool unescape_hex(char **src, char **dst)
120{
121 char *p = *dst, *q = *src;
122 int digit;
123 u8 num;
124
125 if (*q++ != 'x')
126 return false;
127
128 num = digit = hex_to_bin(*q++);
129 if (digit < 0)
130 return false;
131
132 digit = hex_to_bin(*q);
133 if (digit >= 0) {
134 q++;
135 num = (num << 4) | digit;
136 }
137 *p = num;
138 *dst += 1;
139 *src = q;
140 return true;
141}
142
143static bool unescape_special(char **src, char **dst)
144{
145 char *p = *dst, *q = *src;
146
147 switch (*q) {
148 case '\"':
149 *p = '\"';
150 break;
151 case '\\':
152 *p = '\\';
153 break;
154 case 'a':
155 *p = '\a';
156 break;
157 case 'e':
158 *p = '\e';
159 break;
160 default:
161 return false;
162 }
163 *dst += 1;
164 *src += 1;
165 return true;
166}
167
168int string_unescape(char *src, char *dst, size_t size, unsigned int flags)
169{
170 char *out = dst;
171
172 while (*src && --size) {
173 if (src[0] == '\\' && src[1] != '\0' && size > 1) {
174 src++;
175 size--;
176
177 if (flags & UNESCAPE_SPACE &&
178 unescape_space(&src, &out))
179 continue;
180
181 if (flags & UNESCAPE_OCTAL &&
182 unescape_octal(&src, &out))
183 continue;
184
185 if (flags & UNESCAPE_HEX &&
186 unescape_hex(&src, &out))
187 continue;
188
189 if (flags & UNESCAPE_SPECIAL &&
190 unescape_special(&src, &out))
191 continue;
192
193 *out++ = '\\';
194 }
195 *out++ = *src++;
196 }
197 *out = '\0';
198
199 return out - dst;
200}
201EXPORT_SYMBOL(string_unescape);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index bfe02b8fc55b..d23762e6652c 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -105,9 +105,9 @@ setup_io_tlb_npages(char *str)
105 if (!strcmp(str, "force")) 105 if (!strcmp(str, "force"))
106 swiotlb_force = 1; 106 swiotlb_force = 1;
107 107
108 return 1; 108 return 0;
109} 109}
110__setup("swiotlb=", setup_io_tlb_npages); 110early_param("swiotlb", setup_io_tlb_npages);
111/* make io_tlb_overflow tunable too? */ 111/* make io_tlb_overflow tunable too? */
112 112
113unsigned long swiotlb_nr_tbl(void) 113unsigned long swiotlb_nr_tbl(void)
@@ -115,6 +115,18 @@ unsigned long swiotlb_nr_tbl(void)
115 return io_tlb_nslabs; 115 return io_tlb_nslabs;
116} 116}
117EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); 117EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
118
119/* default to 64MB */
120#define IO_TLB_DEFAULT_SIZE (64UL<<20)
121unsigned long swiotlb_size_or_default(void)
122{
123 unsigned long size;
124
125 size = io_tlb_nslabs << IO_TLB_SHIFT;
126
127 return size ? size : (IO_TLB_DEFAULT_SIZE);
128}
129
118/* Note that this doesn't work with highmem page */ 130/* Note that this doesn't work with highmem page */
119static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 131static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
120 volatile void *address) 132 volatile void *address)
@@ -188,8 +200,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
188void __init 200void __init
189swiotlb_init(int verbose) 201swiotlb_init(int verbose)
190{ 202{
191 /* default to 64MB */ 203 size_t default_size = IO_TLB_DEFAULT_SIZE;
192 size_t default_size = 64UL<<20;
193 unsigned char *vstart; 204 unsigned char *vstart;
194 unsigned long bytes; 205 unsigned long bytes;
195 206
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
new file mode 100644
index 000000000000..6ac48de04c0e
--- /dev/null
+++ b/lib/test-string_helpers.c
@@ -0,0 +1,103 @@
1/*
2 * Test cases for lib/string_helpers.c module.
3 */
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/init.h>
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/random.h>
10#include <linux/string.h>
11#include <linux/string_helpers.h>
12
13struct test_string {
14 const char *in;
15 const char *out;
16 unsigned int flags;
17};
18
19static const struct test_string strings[] __initconst = {
20 {
21 .in = "\\f\\ \\n\\r\\t\\v",
22 .out = "\f\\ \n\r\t\v",
23 .flags = UNESCAPE_SPACE,
24 },
25 {
26 .in = "\\40\\1\\387\\0064\\05\\040\\8a\\110\\777",
27 .out = " \001\00387\0064\005 \\8aH?7",
28 .flags = UNESCAPE_OCTAL,
29 },
30 {
31 .in = "\\xv\\xa\\x2c\\xD\\x6f2",
32 .out = "\\xv\n,\ro2",
33 .flags = UNESCAPE_HEX,
34 },
35 {
36 .in = "\\h\\\\\\\"\\a\\e\\",
37 .out = "\\h\\\"\a\e\\",
38 .flags = UNESCAPE_SPECIAL,
39 },
40};
41
42static void __init test_string_unescape(unsigned int flags, bool inplace)
43{
44 char in[256];
45 char out_test[256];
46 char out_real[256];
47 int i, p = 0, q_test = 0, q_real = sizeof(out_real);
48
49 for (i = 0; i < ARRAY_SIZE(strings); i++) {
50 const char *s = strings[i].in;
51 int len = strlen(strings[i].in);
52
53 /* Copy string to in buffer */
54 memcpy(&in[p], s, len);
55 p += len;
56
57 /* Copy expected result for given flags */
58 if (flags & strings[i].flags) {
59 s = strings[i].out;
60 len = strlen(strings[i].out);
61 }
62 memcpy(&out_test[q_test], s, len);
63 q_test += len;
64 }
65 in[p++] = '\0';
66
67 /* Call string_unescape and compare result */
68 if (inplace) {
69 memcpy(out_real, in, p);
70 if (flags == UNESCAPE_ANY)
71 q_real = string_unescape_any_inplace(out_real);
72 else
73 q_real = string_unescape_inplace(out_real, flags);
74 } else if (flags == UNESCAPE_ANY) {
75 q_real = string_unescape_any(in, out_real, q_real);
76 } else {
77 q_real = string_unescape(in, out_real, q_real, flags);
78 }
79
80 if (q_real != q_test || memcmp(out_test, out_real, q_test)) {
81 pr_warn("Test failed: flags = %u\n", flags);
82 print_hex_dump(KERN_WARNING, "Input: ",
83 DUMP_PREFIX_NONE, 16, 1, in, p - 1, true);
84 print_hex_dump(KERN_WARNING, "Expected: ",
85 DUMP_PREFIX_NONE, 16, 1, out_test, q_test, true);
86 print_hex_dump(KERN_WARNING, "Got: ",
87 DUMP_PREFIX_NONE, 16, 1, out_real, q_real, true);
88 }
89}
90
91static int __init test_string_helpers_init(void)
92{
93 unsigned int i;
94
95 pr_info("Running tests...\n");
96 for (i = 0; i < UNESCAPE_ANY + 1; i++)
97 test_string_unescape(i, false);
98 test_string_unescape(get_random_int() % (UNESCAPE_ANY + 1), true);
99
100 return -EINVAL;
101}
102module_init(test_string_helpers_init);
103MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
new file mode 100644
index 000000000000..6f500ef2301d
--- /dev/null
+++ b/lib/ucs2_string.c
@@ -0,0 +1,51 @@
1#include <linux/ucs2_string.h>
2#include <linux/module.h>
3
4/* Return the number of unicode characters in data */
5unsigned long
6ucs2_strnlen(const ucs2_char_t *s, size_t maxlength)
7{
8 unsigned long length = 0;
9
10 while (*s++ != 0 && length < maxlength)
11 length++;
12 return length;
13}
14EXPORT_SYMBOL(ucs2_strnlen);
15
16unsigned long
17ucs2_strlen(const ucs2_char_t *s)
18{
19 return ucs2_strnlen(s, ~0UL);
20}
21EXPORT_SYMBOL(ucs2_strlen);
22
23/*
24 * Return the number of bytes is the length of this string
25 * Note: this is NOT the same as the number of unicode characters
26 */
27unsigned long
28ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength)
29{
30 return ucs2_strnlen(data, maxlength/sizeof(ucs2_char_t)) * sizeof(ucs2_char_t);
31}
32EXPORT_SYMBOL(ucs2_strsize);
33
34int
35ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
36{
37 while (1) {
38 if (len == 0)
39 return 0;
40 if (*a < *b)
41 return -1;
42 if (*a > *b)
43 return 1;
44 if (*a == 0) /* implies *b == 0 */
45 return 0;
46 a++;
47 b++;
48 len--;
49 }
50}
51EXPORT_SYMBOL(ucs2_strncmp);
diff --git a/lib/usercopy.c b/lib/usercopy.c
new file mode 100644
index 000000000000..4f5b1ddbcd25
--- /dev/null
+++ b/lib/usercopy.c
@@ -0,0 +1,9 @@
1#include <linux/export.h>
2#include <linux/bug.h>
3#include <linux/uaccess.h>
4
5void copy_from_user_overflow(void)
6{
7 WARN(1, "Buffer overflow detected!\n");
8}
9EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/lib/uuid.c b/lib/uuid.c
index 52a6fe6387de..398821e4dce1 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -25,13 +25,7 @@
25 25
26static void __uuid_gen_common(__u8 b[16]) 26static void __uuid_gen_common(__u8 b[16])
27{ 27{
28 int i; 28 prandom_bytes(b, 16);
29 u32 r;
30
31 for (i = 0; i < 4; i++) {
32 r = random32();
33 memcpy(b + i * 4, &r, 4);
34 }
35 /* reversion 0b10 */ 29 /* reversion 0b10 */
36 b[8] = (b[8] & 0x3F) | 0x80; 30 b[8] = (b[8] & 0x3F) | 0x80;
37} 31}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0d62fd700f68..e149c6416384 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -534,14 +534,21 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec)
534 534
535static noinline_for_stack 535static noinline_for_stack
536char *symbol_string(char *buf, char *end, void *ptr, 536char *symbol_string(char *buf, char *end, void *ptr,
537 struct printf_spec spec, char ext) 537 struct printf_spec spec, const char *fmt)
538{ 538{
539 unsigned long value = (unsigned long) ptr; 539 unsigned long value;
540#ifdef CONFIG_KALLSYMS 540#ifdef CONFIG_KALLSYMS
541 char sym[KSYM_SYMBOL_LEN]; 541 char sym[KSYM_SYMBOL_LEN];
542 if (ext == 'B') 542#endif
543
544 if (fmt[1] == 'R')
545 ptr = __builtin_extract_return_addr(ptr);
546 value = (unsigned long)ptr;
547
548#ifdef CONFIG_KALLSYMS
549 if (*fmt == 'B')
543 sprint_backtrace(sym, value); 550 sprint_backtrace(sym, value);
544 else if (ext != 'f' && ext != 's') 551 else if (*fmt != 'f' && *fmt != 's')
545 sprint_symbol(sym, value); 552 sprint_symbol(sym, value);
546 else 553 else
547 sprint_symbol_no_offset(sym, value); 554 sprint_symbol_no_offset(sym, value);
@@ -987,6 +994,7 @@ int kptr_restrict __read_mostly;
987 * - 'f' For simple symbolic function names without offset 994 * - 'f' For simple symbolic function names without offset
988 * - 'S' For symbolic direct pointers with offset 995 * - 'S' For symbolic direct pointers with offset
989 * - 's' For symbolic direct pointers without offset 996 * - 's' For symbolic direct pointers without offset
997 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
990 * - 'B' For backtraced symbolic direct pointers with offset 998 * - 'B' For backtraced symbolic direct pointers with offset
991 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] 999 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
992 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] 1000 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
@@ -1060,7 +1068,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1060 case 'S': 1068 case 'S':
1061 case 's': 1069 case 's':
1062 case 'B': 1070 case 'B':
1063 return symbol_string(buf, end, ptr, spec, *fmt); 1071 return symbol_string(buf, end, ptr, spec, fmt);
1064 case 'R': 1072 case 'R':
1065 case 'r': 1073 case 'r':
1066 return resource_string(buf, end, ptr, spec, fmt); 1074 return resource_string(buf, end, ptr, spec, fmt);