aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug25
-rw-r--r--lib/Makefile5
-rw-r--r--lib/decompress.c2
-rw-r--r--lib/div64.c19
-rw-r--r--lib/dump_stack.c11
-rw-r--r--lib/dynamic_debug.c48
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/lru_cache.c56
-rw-r--r--lib/notifier-error-inject.c4
-rw-r--r--lib/oid_registry.c5
-rw-r--r--lib/rbtree_test.c9
-rw-r--r--lib/rwsem-spinlock.c38
-rw-r--r--lib/rwsem.c242
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/string_helpers.c133
-rw-r--r--lib/test-string_helpers.c103
-rw-r--r--lib/usercopy.c9
-rw-r--r--lib/vsprintf.c18
18 files changed, 499 insertions, 234 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 28be08c09bab..566cf2bc08ea 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1192,7 +1192,7 @@ config MEMORY_NOTIFIER_ERROR_INJECT
1192 bash: echo: write error: Cannot allocate memory 1192 bash: echo: write error: Cannot allocate memory
1193 1193
1194 To compile this code as a module, choose M here: the module will 1194 To compile this code as a module, choose M here: the module will
1195 be called pSeries-reconfig-notifier-error-inject. 1195 be called memory-notifier-error-inject.
1196 1196
1197 If unsure, say N. 1197 If unsure, say N.
1198 1198
@@ -1209,7 +1209,7 @@ config OF_RECONFIG_NOTIFIER_ERROR_INJECT
1209 notified, write the error code to "actions/<notifier event>/error". 1209 notified, write the error code to "actions/<notifier event>/error".
1210 1210
1211 To compile this code as a module, choose M here: the module will 1211 To compile this code as a module, choose M here: the module will
1212 be called memory-notifier-error-inject. 1212 be called of-reconfig-notifier-error-inject.
1213 1213
1214 If unsure, say N. 1214 If unsure, say N.
1215 1215
@@ -1292,6 +1292,24 @@ config LATENCYTOP
1292 Enable this option if you want to use the LatencyTOP tool 1292 Enable this option if you want to use the LatencyTOP tool
1293 to find out which userspace is blocking on what kernel operations. 1293 to find out which userspace is blocking on what kernel operations.
1294 1294
1295config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
1296 bool
1297
1298config DEBUG_STRICT_USER_COPY_CHECKS
1299 bool "Strict user copy size checks"
1300 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
1301 depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
1302 help
1303 Enabling this option turns a certain set of sanity checks for user
1304 copy operations into compile time failures.
1305
1306 The copy_from_user() etc checks are there to help test if there
1307 are sufficient security checks on the length argument of
1308 the copy operation, by having gcc prove that the argument is
1309 within bounds.
1310
1311 If unsure, say N.
1312
1295source mm/Kconfig.debug 1313source mm/Kconfig.debug
1296source kernel/trace/Kconfig 1314source kernel/trace/Kconfig
1297 1315
@@ -1463,5 +1481,8 @@ source "lib/Kconfig.kgdb"
1463 1481
1464source "lib/Kconfig.kmemcheck" 1482source "lib/Kconfig.kmemcheck"
1465 1483
1484config TEST_STRING_HELPERS
1485 tristate "Test functions located in the string_helpers module at runtime"
1486
1466config TEST_KSTRTOX 1487config TEST_KSTRTOX
1467 tristate "Test kstrto*() family of functions at runtime" 1488 tristate "Test kstrto*() family of functions at runtime"
diff --git a/lib/Makefile b/lib/Makefile
index 6e2cc561f761..e9c52e1b853a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -15,6 +15,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
16 earlycpio.o 16 earlycpio.o
17 17
18obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
18lib-$(CONFIG_MMU) += ioremap.o 19lib-$(CONFIG_MMU) += ioremap.o
19lib-$(CONFIG_SMP) += cpumask.o 20lib-$(CONFIG_SMP) += cpumask.o
20 21
@@ -22,8 +23,10 @@ lib-y += kobject.o klist.o
22 23
23obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 24obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
24 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 25 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
25 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ 26 gcd.o lcm.o list_sort.o uuid.o flex_array.o \
26 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o 27 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o
28obj-y += string_helpers.o
29obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
27obj-y += kstrtox.o 30obj-y += kstrtox.o
28obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 31obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
29 32
diff --git a/lib/decompress.c b/lib/decompress.c
index 31a804277282..f8fdedaf7b3d 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -38,7 +38,7 @@ struct compress_format {
38 decompress_fn decompressor; 38 decompress_fn decompressor;
39}; 39};
40 40
41static const struct compress_format compressed_formats[] __initdata = { 41static const struct compress_format compressed_formats[] __initconst = {
42 { {037, 0213}, "gzip", gunzip }, 42 { {037, 0213}, "gzip", gunzip },
43 { {037, 0236}, "gzip", gunzip }, 43 { {037, 0236}, "gzip", gunzip },
44 { {0x42, 0x5a}, "bzip2", bunzip2 }, 44 { {0x42, 0x5a}, "bzip2", bunzip2 },
diff --git a/lib/div64.c b/lib/div64.c
index 3af5728d95fd..a163b6caef73 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -79,10 +79,9 @@ EXPORT_SYMBOL(div_s64_rem);
79#endif 79#endif
80 80
81/** 81/**
82 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and 64bit remainder 82 * div64_u64 - unsigned 64bit divide with 64bit divisor
83 * @dividend: 64bit dividend 83 * @dividend: 64bit dividend
84 * @divisor: 64bit divisor 84 * @divisor: 64bit divisor
85 * @remainder: 64bit remainder
86 * 85 *
87 * This implementation is a modified version of the algorithm proposed 86 * This implementation is a modified version of the algorithm proposed
88 * by the book 'Hacker's Delight'. The original source and full proof 87 * by the book 'Hacker's Delight'. The original source and full proof
@@ -90,33 +89,27 @@ EXPORT_SYMBOL(div_s64_rem);
90 * 89 *
91 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt' 90 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
92 */ 91 */
93#ifndef div64_u64_rem 92#ifndef div64_u64
94u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) 93u64 div64_u64(u64 dividend, u64 divisor)
95{ 94{
96 u32 high = divisor >> 32; 95 u32 high = divisor >> 32;
97 u64 quot; 96 u64 quot;
98 97
99 if (high == 0) { 98 if (high == 0) {
100 u32 rem32; 99 quot = div_u64(dividend, divisor);
101 quot = div_u64_rem(dividend, divisor, &rem32);
102 *remainder = rem32;
103 } else { 100 } else {
104 int n = 1 + fls(high); 101 int n = 1 + fls(high);
105 quot = div_u64(dividend >> n, divisor >> n); 102 quot = div_u64(dividend >> n, divisor >> n);
106 103
107 if (quot != 0) 104 if (quot != 0)
108 quot--; 105 quot--;
109 106 if ((dividend - quot * divisor) >= divisor)
110 *remainder = dividend - quot * divisor;
111 if (*remainder >= divisor) {
112 quot++; 107 quot++;
113 *remainder -= divisor;
114 }
115 } 108 }
116 109
117 return quot; 110 return quot;
118} 111}
119EXPORT_SYMBOL(div64_u64_rem); 112EXPORT_SYMBOL(div64_u64);
120#endif 113#endif
121 114
122/** 115/**
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index 42f4f55c9458..53bad099ebd6 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -5,11 +5,16 @@
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/export.h> 7#include <linux/export.h>
8#include <linux/sched.h>
8 9
10/**
11 * dump_stack - dump the current task information and its stack trace
12 *
13 * Architectures can override this implementation by implementing its own.
14 */
9void dump_stack(void) 15void dump_stack(void)
10{ 16{
11 printk(KERN_NOTICE 17 dump_stack_print_info(KERN_DEFAULT);
12 "This architecture does not implement dump_stack()\n"); 18 show_stack(NULL, NULL);
13} 19}
14
15EXPORT_SYMBOL(dump_stack); 20EXPORT_SYMBOL(dump_stack);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 46032453abd5..99fec3ae405a 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -24,6 +24,7 @@
24#include <linux/sysctl.h> 24#include <linux/sysctl.h>
25#include <linux/ctype.h> 25#include <linux/ctype.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/string_helpers.h>
27#include <linux/uaccess.h> 28#include <linux/uaccess.h>
28#include <linux/dynamic_debug.h> 29#include <linux/dynamic_debug.h>
29#include <linux/debugfs.h> 30#include <linux/debugfs.h>
@@ -276,47 +277,6 @@ static inline int parse_lineno(const char *str, unsigned int *val)
276 return 0; 277 return 0;
277} 278}
278 279
279/*
280 * Undo octal escaping in a string, inplace. This is useful to
281 * allow the user to express a query which matches a format
282 * containing embedded spaces.
283 */
284static char *unescape(char *str)
285{
286 char *in = str;
287 char *out = str;
288
289 while (*in) {
290 if (*in == '\\') {
291 if (in[1] == '\\') {
292 *out++ = '\\';
293 in += 2;
294 continue;
295 } else if (in[1] == 't') {
296 *out++ = '\t';
297 in += 2;
298 continue;
299 } else if (in[1] == 'n') {
300 *out++ = '\n';
301 in += 2;
302 continue;
303 } else if (isodigit(in[1]) &&
304 isodigit(in[2]) &&
305 isodigit(in[3])) {
306 *out++ = (((in[1] - '0') << 6) |
307 ((in[2] - '0') << 3) |
308 (in[3] - '0'));
309 in += 4;
310 continue;
311 }
312 }
313 *out++ = *in++;
314 }
315 *out = '\0';
316
317 return str;
318}
319
320static int check_set(const char **dest, char *src, char *name) 280static int check_set(const char **dest, char *src, char *name)
321{ 281{
322 int rc = 0; 282 int rc = 0;
@@ -370,8 +330,10 @@ static int ddebug_parse_query(char *words[], int nwords,
370 } else if (!strcmp(words[i], "module")) { 330 } else if (!strcmp(words[i], "module")) {
371 rc = check_set(&query->module, words[i+1], "module"); 331 rc = check_set(&query->module, words[i+1], "module");
372 } else if (!strcmp(words[i], "format")) { 332 } else if (!strcmp(words[i], "format")) {
373 rc = check_set(&query->format, unescape(words[i+1]), 333 string_unescape_inplace(words[i+1], UNESCAPE_SPACE |
374 "format"); 334 UNESCAPE_OCTAL |
335 UNESCAPE_SPECIAL);
336 rc = check_set(&query->format, words[i+1], "format");
375 } else if (!strcmp(words[i], "line")) { 337 } else if (!strcmp(words[i], "line")) {
376 char *first = words[i+1]; 338 char *first = words[i+1];
377 char *last = strchr(first, '-'); 339 char *last = strchr(first, '-');
diff --git a/lib/kobject.c b/lib/kobject.c
index a65486613d79..b7e29a6056d3 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -529,7 +529,7 @@ struct kobject *kobject_get(struct kobject *kobj)
529 return kobj; 529 return kobj;
530} 530}
531 531
532static struct kobject *kobject_get_unless_zero(struct kobject *kobj) 532static struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
533{ 533{
534 if (!kref_get_unless_zero(&kobj->kref)) 534 if (!kref_get_unless_zero(&kobj->kref))
535 kobj = NULL; 535 kobj = NULL;
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index 8335d39d2ccd..4a83ecd03650 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -365,7 +365,13 @@ static int lc_unused_element_available(struct lru_cache *lc)
365 return 0; 365 return 0;
366} 366}
367 367
368static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool may_change) 368/* used as internal flags to __lc_get */
369enum {
370 LC_GET_MAY_CHANGE = 1,
371 LC_GET_MAY_USE_UNCOMMITTED = 2,
372};
373
374static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsigned int flags)
369{ 375{
370 struct lc_element *e; 376 struct lc_element *e;
371 377
@@ -380,22 +386,31 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool
380 * this enr is currently being pulled in already, 386 * this enr is currently being pulled in already,
381 * and will be available once the pending transaction 387 * and will be available once the pending transaction
382 * has been committed. */ 388 * has been committed. */
383 if (e && e->lc_new_number == e->lc_number) { 389 if (e) {
390 if (e->lc_new_number != e->lc_number) {
391 /* It has been found above, but on the "to_be_changed"
392 * list, not yet committed. Don't pull it in twice,
393 * wait for the transaction, then try again...
394 */
395 if (!(flags & LC_GET_MAY_USE_UNCOMMITTED))
396 RETURN(NULL);
397 /* ... unless the caller is aware of the implications,
398 * probably preparing a cumulative transaction. */
399 ++e->refcnt;
400 ++lc->hits;
401 RETURN(e);
402 }
403 /* else: lc_new_number == lc_number; a real hit. */
384 ++lc->hits; 404 ++lc->hits;
385 if (e->refcnt++ == 0) 405 if (e->refcnt++ == 0)
386 lc->used++; 406 lc->used++;
387 list_move(&e->list, &lc->in_use); /* Not evictable... */ 407 list_move(&e->list, &lc->in_use); /* Not evictable... */
388 RETURN(e); 408 RETURN(e);
389 } 409 }
410 /* e == NULL */
390 411
391 ++lc->misses; 412 ++lc->misses;
392 if (!may_change) 413 if (!(flags & LC_GET_MAY_CHANGE))
393 RETURN(NULL);
394
395 /* It has been found above, but on the "to_be_changed" list, not yet
396 * committed. Don't pull it in twice, wait for the transaction, then
397 * try again */
398 if (e)
399 RETURN(NULL); 414 RETURN(NULL);
400 415
401 /* To avoid races with lc_try_lock(), first, mark us dirty 416 /* To avoid races with lc_try_lock(), first, mark us dirty
@@ -477,7 +492,27 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, bool
477 */ 492 */
478struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr) 493struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr)
479{ 494{
480 return __lc_get(lc, enr, 1); 495 return __lc_get(lc, enr, LC_GET_MAY_CHANGE);
496}
497
498/**
499 * lc_get_cumulative - like lc_get; also finds to-be-changed elements
500 * @lc: the lru cache to operate on
501 * @enr: the label to look up
502 *
503 * Unlike lc_get this also returns the element for @enr, if it is belonging to
504 * a pending transaction, so the return values are like for lc_get(),
505 * plus:
506 *
507 * pointer to an element already on the "to_be_changed" list.
508 * In this case, the cache was already marked %LC_DIRTY.
509 *
510 * Caller needs to make sure that the pending transaction is completed,
511 * before proceeding to actually use this element.
512 */
513struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr)
514{
515 return __lc_get(lc, enr, LC_GET_MAY_CHANGE|LC_GET_MAY_USE_UNCOMMITTED);
481} 516}
482 517
483/** 518/**
@@ -648,3 +683,4 @@ EXPORT_SYMBOL(lc_seq_printf_stats);
648EXPORT_SYMBOL(lc_seq_dump_details); 683EXPORT_SYMBOL(lc_seq_dump_details);
649EXPORT_SYMBOL(lc_try_lock); 684EXPORT_SYMBOL(lc_try_lock);
650EXPORT_SYMBOL(lc_is_used); 685EXPORT_SYMBOL(lc_is_used);
686EXPORT_SYMBOL(lc_get_cumulative);
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index 44b92cb6224f..eb4a04afea80 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -17,7 +17,7 @@ static int debugfs_errno_get(void *data, u64 *val)
17DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set, 17DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set,
18 "%lld\n"); 18 "%lld\n");
19 19
20static struct dentry *debugfs_create_errno(const char *name, mode_t mode, 20static struct dentry *debugfs_create_errno(const char *name, umode_t mode,
21 struct dentry *parent, int *value) 21 struct dentry *parent, int *value)
22{ 22{
23 return debugfs_create_file(name, mode, parent, value, &fops_errno); 23 return debugfs_create_file(name, mode, parent, value, &fops_errno);
@@ -50,7 +50,7 @@ struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent,
50 struct notifier_err_inject *err_inject, int priority) 50 struct notifier_err_inject *err_inject, int priority)
51{ 51{
52 struct notifier_err_inject_action *action; 52 struct notifier_err_inject_action *action;
53 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 53 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
54 struct dentry *dir; 54 struct dentry *dir;
55 struct dentry *actions_dir; 55 struct dentry *actions_dir;
56 56
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index d8de11f45908..318f382a010d 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -9,6 +9,7 @@
9 * 2 of the Licence, or (at your option) any later version. 9 * 2 of the Licence, or (at your option) any later version.
10 */ 10 */
11 11
12#include <linux/module.h>
12#include <linux/export.h> 13#include <linux/export.h>
13#include <linux/oid_registry.h> 14#include <linux/oid_registry.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -16,6 +17,10 @@
16#include <linux/bug.h> 17#include <linux/bug.h>
17#include "oid_registry_data.c" 18#include "oid_registry_data.c"
18 19
20MODULE_DESCRIPTION("OID Registry");
21MODULE_AUTHOR("Red Hat, Inc.");
22MODULE_LICENSE("GPL");
23
19/** 24/**
20 * look_up_OID - Find an OID registration for the specified data 25 * look_up_OID - Find an OID registration for the specified data
21 * @data: Binary representation of the OID 26 * @data: Binary representation of the OID
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index af38aedbd874..122f02f9941b 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -117,8 +117,7 @@ static int black_path_count(struct rb_node *rb)
117static void check(int nr_nodes) 117static void check(int nr_nodes)
118{ 118{
119 struct rb_node *rb; 119 struct rb_node *rb;
120 int count = 0; 120 int count = 0, blacks = 0;
121 int blacks = 0;
122 u32 prev_key = 0; 121 u32 prev_key = 0;
123 122
124 for (rb = rb_first(&root); rb; rb = rb_next(rb)) { 123 for (rb = rb_first(&root); rb; rb = rb_next(rb)) {
@@ -134,7 +133,9 @@ static void check(int nr_nodes)
134 prev_key = node->key; 133 prev_key = node->key;
135 count++; 134 count++;
136 } 135 }
136
137 WARN_ON_ONCE(count != nr_nodes); 137 WARN_ON_ONCE(count != nr_nodes);
138 WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1);
138} 139}
139 140
140static void check_augmented(int nr_nodes) 141static void check_augmented(int nr_nodes)
@@ -148,7 +149,7 @@ static void check_augmented(int nr_nodes)
148 } 149 }
149} 150}
150 151
151static int rbtree_test_init(void) 152static int __init rbtree_test_init(void)
152{ 153{
153 int i, j; 154 int i, j;
154 cycles_t time1, time2, time; 155 cycles_t time1, time2, time;
@@ -221,7 +222,7 @@ static int rbtree_test_init(void)
221 return -EAGAIN; /* Fail will directly unload the module */ 222 return -EAGAIN; /* Fail will directly unload the module */
222} 223}
223 224
224static void rbtree_test_exit(void) 225static void __exit rbtree_test_exit(void)
225{ 226{
226 printk(KERN_ALERT "test exit\n"); 227 printk(KERN_ALERT "test exit\n");
227} 228}
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index 7542afbb22b3..9be8a9144978 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -9,12 +9,15 @@
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/export.h> 10#include <linux/export.h>
11 11
12enum rwsem_waiter_type {
13 RWSEM_WAITING_FOR_WRITE,
14 RWSEM_WAITING_FOR_READ
15};
16
12struct rwsem_waiter { 17struct rwsem_waiter {
13 struct list_head list; 18 struct list_head list;
14 struct task_struct *task; 19 struct task_struct *task;
15 unsigned int flags; 20 enum rwsem_waiter_type type;
16#define RWSEM_WAITING_FOR_READ 0x00000001
17#define RWSEM_WAITING_FOR_WRITE 0x00000002
18}; 21};
19 22
20int rwsem_is_locked(struct rw_semaphore *sem) 23int rwsem_is_locked(struct rw_semaphore *sem)
@@ -67,26 +70,17 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
67 70
68 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); 71 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
69 72
70 if (!wakewrite) { 73 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
71 if (waiter->flags & RWSEM_WAITING_FOR_WRITE) 74 if (wakewrite)
72 goto out; 75 /* Wake up a writer. Note that we do not grant it the
73 goto dont_wake_writers; 76 * lock - it will have to acquire it when it runs. */
74 } 77 wake_up_process(waiter->task);
75
76 /*
77 * as we support write lock stealing, we can't set sem->activity
78 * to -1 here to indicate we get the lock. Instead, we wake it up
79 * to let it go get it again.
80 */
81 if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
82 wake_up_process(waiter->task);
83 goto out; 78 goto out;
84 } 79 }
85 80
86 /* grant an infinite number of read locks to the front of the queue */ 81 /* grant an infinite number of read locks to the front of the queue */
87 dont_wake_writers:
88 woken = 0; 82 woken = 0;
89 while (waiter->flags & RWSEM_WAITING_FOR_READ) { 83 do {
90 struct list_head *next = waiter->list.next; 84 struct list_head *next = waiter->list.next;
91 85
92 list_del(&waiter->list); 86 list_del(&waiter->list);
@@ -96,10 +90,10 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
96 wake_up_process(tsk); 90 wake_up_process(tsk);
97 put_task_struct(tsk); 91 put_task_struct(tsk);
98 woken++; 92 woken++;
99 if (list_empty(&sem->wait_list)) 93 if (next == &sem->wait_list)
100 break; 94 break;
101 waiter = list_entry(next, struct rwsem_waiter, list); 95 waiter = list_entry(next, struct rwsem_waiter, list);
102 } 96 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
103 97
104 sem->activity += woken; 98 sem->activity += woken;
105 99
@@ -144,7 +138,7 @@ void __sched __down_read(struct rw_semaphore *sem)
144 138
145 /* set up my own style of waitqueue */ 139 /* set up my own style of waitqueue */
146 waiter.task = tsk; 140 waiter.task = tsk;
147 waiter.flags = RWSEM_WAITING_FOR_READ; 141 waiter.type = RWSEM_WAITING_FOR_READ;
148 get_task_struct(tsk); 142 get_task_struct(tsk);
149 143
150 list_add_tail(&waiter.list, &sem->wait_list); 144 list_add_tail(&waiter.list, &sem->wait_list);
@@ -201,7 +195,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
201 /* set up my own style of waitqueue */ 195 /* set up my own style of waitqueue */
202 tsk = current; 196 tsk = current;
203 waiter.task = tsk; 197 waiter.task = tsk;
204 waiter.flags = RWSEM_WAITING_FOR_WRITE; 198 waiter.type = RWSEM_WAITING_FOR_WRITE;
205 list_add_tail(&waiter.list, &sem->wait_list); 199 list_add_tail(&waiter.list, &sem->wait_list);
206 200
207 /* wait for someone to release the lock */ 201 /* wait for someone to release the lock */
diff --git a/lib/rwsem.c b/lib/rwsem.c
index ad5e0df16ab4..19c5fa95e0b4 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -4,6 +4,7 @@
4 * Derived from arch/i386/kernel/semaphore.c 4 * Derived from arch/i386/kernel/semaphore.c
5 * 5 *
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com> 6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7 * and Michel Lespinasse <walken@google.com>
7 */ 8 */
8#include <linux/rwsem.h> 9#include <linux/rwsem.h>
9#include <linux/sched.h> 10#include <linux/sched.h>
@@ -30,21 +31,22 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
30 31
31EXPORT_SYMBOL(__init_rwsem); 32EXPORT_SYMBOL(__init_rwsem);
32 33
34enum rwsem_waiter_type {
35 RWSEM_WAITING_FOR_WRITE,
36 RWSEM_WAITING_FOR_READ
37};
38
33struct rwsem_waiter { 39struct rwsem_waiter {
34 struct list_head list; 40 struct list_head list;
35 struct task_struct *task; 41 struct task_struct *task;
36 unsigned int flags; 42 enum rwsem_waiter_type type;
37#define RWSEM_WAITING_FOR_READ 0x00000001
38#define RWSEM_WAITING_FOR_WRITE 0x00000002
39}; 43};
40 44
41/* Wake types for __rwsem_do_wake(). Note that RWSEM_WAKE_NO_ACTIVE and 45enum rwsem_wake_type {
42 * RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held 46 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
43 * since the rwsem value was observed. 47 RWSEM_WAKE_READERS, /* Wake readers only */
44 */ 48 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
45#define RWSEM_WAKE_ANY 0 /* Wake whatever's at head of wait list */ 49};
46#define RWSEM_WAKE_NO_ACTIVE 1 /* rwsem was observed with no active thread */
47#define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */
48 50
49/* 51/*
50 * handle the lock release when processes blocked on it that can now run 52 * handle the lock release when processes blocked on it that can now run
@@ -57,46 +59,43 @@ struct rwsem_waiter {
57 * - writers are only woken if downgrading is false 59 * - writers are only woken if downgrading is false
58 */ 60 */
59static struct rw_semaphore * 61static struct rw_semaphore *
60__rwsem_do_wake(struct rw_semaphore *sem, int wake_type) 62__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
61{ 63{
62 struct rwsem_waiter *waiter; 64 struct rwsem_waiter *waiter;
63 struct task_struct *tsk; 65 struct task_struct *tsk;
64 struct list_head *next; 66 struct list_head *next;
65 signed long woken, loop, adjustment; 67 long oldcount, woken, loop, adjustment;
66 68
67 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); 69 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
68 if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) 70 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
69 goto readers_only; 71 if (wake_type == RWSEM_WAKE_ANY)
70 72 /* Wake writer at the front of the queue, but do not
71 if (wake_type == RWSEM_WAKE_READ_OWNED) 73 * grant it the lock yet as we want other writers
72 /* Another active reader was observed, so wakeup is not 74 * to be able to steal it. Readers, on the other hand,
73 * likely to succeed. Save the atomic op. 75 * will block as they will notice the queued writer.
74 */ 76 */
77 wake_up_process(waiter->task);
75 goto out; 78 goto out;
79 }
76 80
77 /* Wake up the writing waiter and let the task grab the sem: */ 81 /* Writers might steal the lock before we grant it to the next reader.
78 wake_up_process(waiter->task); 82 * We prefer to do the first reader grant before counting readers
79 goto out; 83 * so we can bail out early if a writer stole the lock.
80
81 readers_only:
82 /* If we come here from up_xxxx(), another thread might have reached
83 * rwsem_down_failed_common() before we acquired the spinlock and
84 * woken up a waiter, making it now active. We prefer to check for
85 * this first in order to not spend too much time with the spinlock
86 * held if we're not going to be able to wake up readers in the end.
87 *
88 * Note that we do not need to update the rwsem count: any writer
89 * trying to acquire rwsem will run rwsem_down_write_failed() due
90 * to the waiting threads and block trying to acquire the spinlock.
91 *
92 * We use a dummy atomic update in order to acquire the cache line
93 * exclusively since we expect to succeed and run the final rwsem
94 * count adjustment pretty soon.
95 */ 84 */
96 if (wake_type == RWSEM_WAKE_ANY && 85 adjustment = 0;
97 rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS) 86 if (wake_type != RWSEM_WAKE_READ_OWNED) {
98 /* Someone grabbed the sem for write already */ 87 adjustment = RWSEM_ACTIVE_READ_BIAS;
99 goto out; 88 try_reader_grant:
89 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
90 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
91 /* A writer stole the lock. Undo our reader grant. */
92 if (rwsem_atomic_update(-adjustment, sem) &
93 RWSEM_ACTIVE_MASK)
94 goto out;
95 /* Last active locker left. Retry waking readers. */
96 goto try_reader_grant;
97 }
98 }
100 99
101 /* Grant an infinite number of read locks to the readers at the front 100 /* Grant an infinite number of read locks to the readers at the front
102 * of the queue. Note we increment the 'active part' of the count by 101 * of the queue. Note we increment the 'active part' of the count by
@@ -112,17 +111,19 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
112 waiter = list_entry(waiter->list.next, 111 waiter = list_entry(waiter->list.next,
113 struct rwsem_waiter, list); 112 struct rwsem_waiter, list);
114 113
115 } while (waiter->flags & RWSEM_WAITING_FOR_READ); 114 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
116 115
117 adjustment = woken * RWSEM_ACTIVE_READ_BIAS; 116 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
118 if (waiter->flags & RWSEM_WAITING_FOR_READ) 117 if (waiter->type != RWSEM_WAITING_FOR_WRITE)
119 /* hit end of list above */ 118 /* hit end of list above */
120 adjustment -= RWSEM_WAITING_BIAS; 119 adjustment -= RWSEM_WAITING_BIAS;
121 120
122 rwsem_atomic_add(adjustment, sem); 121 if (adjustment)
122 rwsem_atomic_add(adjustment, sem);
123 123
124 next = sem->wait_list.next; 124 next = sem->wait_list.next;
125 for (loop = woken; loop > 0; loop--) { 125 loop = woken;
126 do {
126 waiter = list_entry(next, struct rwsem_waiter, list); 127 waiter = list_entry(next, struct rwsem_waiter, list);
127 next = waiter->list.next; 128 next = waiter->list.next;
128 tsk = waiter->task; 129 tsk = waiter->task;
@@ -130,7 +131,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
130 waiter->task = NULL; 131 waiter->task = NULL;
131 wake_up_process(tsk); 132 wake_up_process(tsk);
132 put_task_struct(tsk); 133 put_task_struct(tsk);
133 } 134 } while (--loop);
134 135
135 sem->wait_list.next = next; 136 sem->wait_list.next = next;
136 next->prev = &sem->wait_list; 137 next->prev = &sem->wait_list;
@@ -139,60 +140,21 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wake_type)
139 return sem; 140 return sem;
140} 141}
141 142
142/* Try to get write sem, caller holds sem->wait_lock: */
143static int try_get_writer_sem(struct rw_semaphore *sem,
144 struct rwsem_waiter *waiter)
145{
146 struct rwsem_waiter *fwaiter;
147 long oldcount, adjustment;
148
149 /* only steal when first waiter is writing */
150 fwaiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
151 if (!(fwaiter->flags & RWSEM_WAITING_FOR_WRITE))
152 return 0;
153
154 adjustment = RWSEM_ACTIVE_WRITE_BIAS;
155 /* Only one waiter in the queue: */
156 if (fwaiter == waiter && waiter->list.next == &sem->wait_list)
157 adjustment -= RWSEM_WAITING_BIAS;
158
159try_again_write:
160 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
161 if (!(oldcount & RWSEM_ACTIVE_MASK)) {
162 /* No active lock: */
163 struct task_struct *tsk = waiter->task;
164
165 list_del(&waiter->list);
166 smp_mb();
167 put_task_struct(tsk);
168 tsk->state = TASK_RUNNING;
169 return 1;
170 }
171 /* some one grabbed the sem already */
172 if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK)
173 return 0;
174 goto try_again_write;
175}
176
177/* 143/*
178 * wait for a lock to be granted 144 * wait for the read lock to be granted
179 */ 145 */
180static struct rw_semaphore __sched * 146struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
181rwsem_down_failed_common(struct rw_semaphore *sem,
182 unsigned int flags, signed long adjustment)
183{ 147{
148 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
184 struct rwsem_waiter waiter; 149 struct rwsem_waiter waiter;
185 struct task_struct *tsk = current; 150 struct task_struct *tsk = current;
186 signed long count;
187
188 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
189 151
190 /* set up my own style of waitqueue */ 152 /* set up my own style of waitqueue */
191 raw_spin_lock_irq(&sem->wait_lock);
192 waiter.task = tsk; 153 waiter.task = tsk;
193 waiter.flags = flags; 154 waiter.type = RWSEM_WAITING_FOR_READ;
194 get_task_struct(tsk); 155 get_task_struct(tsk);
195 156
157 raw_spin_lock_irq(&sem->wait_lock);
196 if (list_empty(&sem->wait_list)) 158 if (list_empty(&sem->wait_list))
197 adjustment += RWSEM_WAITING_BIAS; 159 adjustment += RWSEM_WAITING_BIAS;
198 list_add_tail(&waiter.list, &sem->wait_list); 160 list_add_tail(&waiter.list, &sem->wait_list);
@@ -200,35 +162,24 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
200 /* we're now waiting on the lock, but no longer actively locking */ 162 /* we're now waiting on the lock, but no longer actively locking */
201 count = rwsem_atomic_update(adjustment, sem); 163 count = rwsem_atomic_update(adjustment, sem);
202 164
203 /* If there are no active locks, wake the front queued process(es) up. 165 /* If there are no active locks, wake the front queued process(es).
204 * 166 *
205 * Alternatively, if we're called from a failed down_write(), there 167 * If there are no writers and we are first in the queue,
206 * were already threads queued before us and there are no active 168 * wake our own waiter to join the existing active readers !
207 * writers, the lock must be read owned; so we try to wake any read 169 */
208 * locks that were queued ahead of us. */ 170 if (count == RWSEM_WAITING_BIAS ||
209 if (count == RWSEM_WAITING_BIAS) 171 (count > RWSEM_WAITING_BIAS &&
210 sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE); 172 adjustment != -RWSEM_ACTIVE_READ_BIAS))
211 else if (count > RWSEM_WAITING_BIAS && 173 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
212 adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
213 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
214 174
215 raw_spin_unlock_irq(&sem->wait_lock); 175 raw_spin_unlock_irq(&sem->wait_lock);
216 176
217 /* wait to be given the lock */ 177 /* wait to be given the lock */
218 for (;;) { 178 while (true) {
179 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
219 if (!waiter.task) 180 if (!waiter.task)
220 break; 181 break;
221
222 raw_spin_lock_irq(&sem->wait_lock);
223 /* Try to get the writer sem, may steal from the head writer: */
224 if (flags == RWSEM_WAITING_FOR_WRITE)
225 if (try_get_writer_sem(sem, &waiter)) {
226 raw_spin_unlock_irq(&sem->wait_lock);
227 return sem;
228 }
229 raw_spin_unlock_irq(&sem->wait_lock);
230 schedule(); 182 schedule();
231 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
232 } 183 }
233 184
234 tsk->state = TASK_RUNNING; 185 tsk->state = TASK_RUNNING;
@@ -237,21 +188,64 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
237} 188}
238 189
239/* 190/*
240 * wait for the read lock to be granted 191 * wait until we successfully acquire the write lock
241 */
242struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
243{
244 return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_READ,
245 -RWSEM_ACTIVE_READ_BIAS);
246}
247
248/*
249 * wait for the write lock to be granted
250 */ 192 */
251struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) 193struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
252{ 194{
253 return rwsem_down_failed_common(sem, RWSEM_WAITING_FOR_WRITE, 195 long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
254 -RWSEM_ACTIVE_WRITE_BIAS); 196 struct rwsem_waiter waiter;
197 struct task_struct *tsk = current;
198
199 /* set up my own style of waitqueue */
200 waiter.task = tsk;
201 waiter.type = RWSEM_WAITING_FOR_WRITE;
202
203 raw_spin_lock_irq(&sem->wait_lock);
204 if (list_empty(&sem->wait_list))
205 adjustment += RWSEM_WAITING_BIAS;
206 list_add_tail(&waiter.list, &sem->wait_list);
207
208 /* we're now waiting on the lock, but no longer actively locking */
209 count = rwsem_atomic_update(adjustment, sem);
210
211 /* If there were already threads queued before us and there are no
212 * active writers, the lock must be read owned; so we try to wake
213 * any read locks that were queued ahead of us. */
214 if (count > RWSEM_WAITING_BIAS &&
215 adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
216 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
217
218 /* wait until we successfully acquire the lock */
219 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
220 while (true) {
221 if (!(count & RWSEM_ACTIVE_MASK)) {
222 /* Try acquiring the write lock. */
223 count = RWSEM_ACTIVE_WRITE_BIAS;
224 if (!list_is_singular(&sem->wait_list))
225 count += RWSEM_WAITING_BIAS;
226
227 if (sem->count == RWSEM_WAITING_BIAS &&
228 cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
229 RWSEM_WAITING_BIAS)
230 break;
231 }
232
233 raw_spin_unlock_irq(&sem->wait_lock);
234
235 /* Block until there are no active lockers. */
236 do {
237 schedule();
238 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
239 } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
240
241 raw_spin_lock_irq(&sem->wait_lock);
242 }
243
244 list_del(&waiter.list);
245 raw_spin_unlock_irq(&sem->wait_lock);
246 tsk->state = TASK_RUNNING;
247
248 return sem;
255} 249}
256 250
257/* 251/*
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b83c144d731f..a1cf8cae60e7 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -401,7 +401,6 @@ void __sg_page_iter_start(struct sg_page_iter *piter,
401 piter->__pg_advance = 0; 401 piter->__pg_advance = 0;
402 piter->__nents = nents; 402 piter->__nents = nents;
403 403
404 piter->page = NULL;
405 piter->sg = sglist; 404 piter->sg = sglist;
406 piter->sg_pgoffset = pgoffset; 405 piter->sg_pgoffset = pgoffset;
407} 406}
@@ -426,7 +425,6 @@ bool __sg_page_iter_next(struct sg_page_iter *piter)
426 if (!--piter->__nents || !piter->sg) 425 if (!--piter->__nents || !piter->sg)
427 return false; 426 return false;
428 } 427 }
429 piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset);
430 428
431 return true; 429 return true;
432} 430}
@@ -496,7 +494,7 @@ bool sg_miter_next(struct sg_mapping_iter *miter)
496 miter->__remaining = min_t(unsigned long, miter->__remaining, 494 miter->__remaining = min_t(unsigned long, miter->__remaining,
497 PAGE_SIZE - miter->__offset); 495 PAGE_SIZE - miter->__offset);
498 } 496 }
499 miter->page = miter->piter.page; 497 miter->page = sg_page_iter_page(&miter->piter);
500 miter->consumed = miter->length = miter->__remaining; 498 miter->consumed = miter->length = miter->__remaining;
501 499
502 if (miter->__flags & SG_MITER_ATOMIC) 500 if (miter->__flags & SG_MITER_ATOMIC)
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 1cffc223bff5..ed5c1454dd62 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -2,10 +2,12 @@
2 * Helpers for formatting and printing strings 2 * Helpers for formatting and printing strings
3 * 3 *
4 * Copyright 31 August 2008 James Bottomley 4 * Copyright 31 August 2008 James Bottomley
5 * Copyright (C) 2013, Intel Corporation
5 */ 6 */
6#include <linux/kernel.h> 7#include <linux/kernel.h>
7#include <linux/math64.h> 8#include <linux/math64.h>
8#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/ctype.h>
9#include <linux/string_helpers.h> 11#include <linux/string_helpers.h>
10 12
11/** 13/**
@@ -66,3 +68,134 @@ int string_get_size(u64 size, const enum string_size_units units,
66 return 0; 68 return 0;
67} 69}
68EXPORT_SYMBOL(string_get_size); 70EXPORT_SYMBOL(string_get_size);
71
72static bool unescape_space(char **src, char **dst)
73{
74 char *p = *dst, *q = *src;
75
76 switch (*q) {
77 case 'n':
78 *p = '\n';
79 break;
80 case 'r':
81 *p = '\r';
82 break;
83 case 't':
84 *p = '\t';
85 break;
86 case 'v':
87 *p = '\v';
88 break;
89 case 'f':
90 *p = '\f';
91 break;
92 default:
93 return false;
94 }
95 *dst += 1;
96 *src += 1;
97 return true;
98}
99
100static bool unescape_octal(char **src, char **dst)
101{
102 char *p = *dst, *q = *src;
103 u8 num;
104
105 if (isodigit(*q) == 0)
106 return false;
107
108 num = (*q++) & 7;
109 while (num < 32 && isodigit(*q) && (q - *src < 3)) {
110 num <<= 3;
111 num += (*q++) & 7;
112 }
113 *p = num;
114 *dst += 1;
115 *src = q;
116 return true;
117}
118
119static bool unescape_hex(char **src, char **dst)
120{
121 char *p = *dst, *q = *src;
122 int digit;
123 u8 num;
124
125 if (*q++ != 'x')
126 return false;
127
128 num = digit = hex_to_bin(*q++);
129 if (digit < 0)
130 return false;
131
132 digit = hex_to_bin(*q);
133 if (digit >= 0) {
134 q++;
135 num = (num << 4) | digit;
136 }
137 *p = num;
138 *dst += 1;
139 *src = q;
140 return true;
141}
142
143static bool unescape_special(char **src, char **dst)
144{
145 char *p = *dst, *q = *src;
146
147 switch (*q) {
148 case '\"':
149 *p = '\"';
150 break;
151 case '\\':
152 *p = '\\';
153 break;
154 case 'a':
155 *p = '\a';
156 break;
157 case 'e':
158 *p = '\e';
159 break;
160 default:
161 return false;
162 }
163 *dst += 1;
164 *src += 1;
165 return true;
166}
167
168int string_unescape(char *src, char *dst, size_t size, unsigned int flags)
169{
170 char *out = dst;
171
172 while (*src && --size) {
173 if (src[0] == '\\' && src[1] != '\0' && size > 1) {
174 src++;
175 size--;
176
177 if (flags & UNESCAPE_SPACE &&
178 unescape_space(&src, &out))
179 continue;
180
181 if (flags & UNESCAPE_OCTAL &&
182 unescape_octal(&src, &out))
183 continue;
184
185 if (flags & UNESCAPE_HEX &&
186 unescape_hex(&src, &out))
187 continue;
188
189 if (flags & UNESCAPE_SPECIAL &&
190 unescape_special(&src, &out))
191 continue;
192
193 *out++ = '\\';
194 }
195 *out++ = *src++;
196 }
197 *out = '\0';
198
199 return out - dst;
200}
201EXPORT_SYMBOL(string_unescape);
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
new file mode 100644
index 000000000000..6ac48de04c0e
--- /dev/null
+++ b/lib/test-string_helpers.c
@@ -0,0 +1,103 @@
1/*
2 * Test cases for lib/string_helpers.c module.
3 */
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/init.h>
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/random.h>
10#include <linux/string.h>
11#include <linux/string_helpers.h>
12
13struct test_string {
14 const char *in;
15 const char *out;
16 unsigned int flags;
17};
18
19static const struct test_string strings[] __initconst = {
20 {
21 .in = "\\f\\ \\n\\r\\t\\v",
22 .out = "\f\\ \n\r\t\v",
23 .flags = UNESCAPE_SPACE,
24 },
25 {
26 .in = "\\40\\1\\387\\0064\\05\\040\\8a\\110\\777",
27 .out = " \001\00387\0064\005 \\8aH?7",
28 .flags = UNESCAPE_OCTAL,
29 },
30 {
31 .in = "\\xv\\xa\\x2c\\xD\\x6f2",
32 .out = "\\xv\n,\ro2",
33 .flags = UNESCAPE_HEX,
34 },
35 {
36 .in = "\\h\\\\\\\"\\a\\e\\",
37 .out = "\\h\\\"\a\e\\",
38 .flags = UNESCAPE_SPECIAL,
39 },
40};
41
42static void __init test_string_unescape(unsigned int flags, bool inplace)
43{
44 char in[256];
45 char out_test[256];
46 char out_real[256];
47 int i, p = 0, q_test = 0, q_real = sizeof(out_real);
48
49 for (i = 0; i < ARRAY_SIZE(strings); i++) {
50 const char *s = strings[i].in;
51 int len = strlen(strings[i].in);
52
53 /* Copy string to in buffer */
54 memcpy(&in[p], s, len);
55 p += len;
56
57 /* Copy expected result for given flags */
58 if (flags & strings[i].flags) {
59 s = strings[i].out;
60 len = strlen(strings[i].out);
61 }
62 memcpy(&out_test[q_test], s, len);
63 q_test += len;
64 }
65 in[p++] = '\0';
66
67 /* Call string_unescape and compare result */
68 if (inplace) {
69 memcpy(out_real, in, p);
70 if (flags == UNESCAPE_ANY)
71 q_real = string_unescape_any_inplace(out_real);
72 else
73 q_real = string_unescape_inplace(out_real, flags);
74 } else if (flags == UNESCAPE_ANY) {
75 q_real = string_unescape_any(in, out_real, q_real);
76 } else {
77 q_real = string_unescape(in, out_real, q_real, flags);
78 }
79
80 if (q_real != q_test || memcmp(out_test, out_real, q_test)) {
81 pr_warn("Test failed: flags = %u\n", flags);
82 print_hex_dump(KERN_WARNING, "Input: ",
83 DUMP_PREFIX_NONE, 16, 1, in, p - 1, true);
84 print_hex_dump(KERN_WARNING, "Expected: ",
85 DUMP_PREFIX_NONE, 16, 1, out_test, q_test, true);
86 print_hex_dump(KERN_WARNING, "Got: ",
87 DUMP_PREFIX_NONE, 16, 1, out_real, q_real, true);
88 }
89}
90
91static int __init test_string_helpers_init(void)
92{
93 unsigned int i;
94
95 pr_info("Running tests...\n");
96 for (i = 0; i < UNESCAPE_ANY + 1; i++)
97 test_string_unescape(i, false);
98 test_string_unescape(get_random_int() % (UNESCAPE_ANY + 1), true);
99
100 return -EINVAL;
101}
102module_init(test_string_helpers_init);
103MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/usercopy.c b/lib/usercopy.c
new file mode 100644
index 000000000000..4f5b1ddbcd25
--- /dev/null
+++ b/lib/usercopy.c
@@ -0,0 +1,9 @@
1#include <linux/export.h>
2#include <linux/bug.h>
3#include <linux/uaccess.h>
4
5void copy_from_user_overflow(void)
6{
7 WARN(1, "Buffer overflow detected!\n");
8}
9EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0d62fd700f68..e149c6416384 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -534,14 +534,21 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec)
534 534
535static noinline_for_stack 535static noinline_for_stack
536char *symbol_string(char *buf, char *end, void *ptr, 536char *symbol_string(char *buf, char *end, void *ptr,
537 struct printf_spec spec, char ext) 537 struct printf_spec spec, const char *fmt)
538{ 538{
539 unsigned long value = (unsigned long) ptr; 539 unsigned long value;
540#ifdef CONFIG_KALLSYMS 540#ifdef CONFIG_KALLSYMS
541 char sym[KSYM_SYMBOL_LEN]; 541 char sym[KSYM_SYMBOL_LEN];
542 if (ext == 'B') 542#endif
543
544 if (fmt[1] == 'R')
545 ptr = __builtin_extract_return_addr(ptr);
546 value = (unsigned long)ptr;
547
548#ifdef CONFIG_KALLSYMS
549 if (*fmt == 'B')
543 sprint_backtrace(sym, value); 550 sprint_backtrace(sym, value);
544 else if (ext != 'f' && ext != 's') 551 else if (*fmt != 'f' && *fmt != 's')
545 sprint_symbol(sym, value); 552 sprint_symbol(sym, value);
546 else 553 else
547 sprint_symbol_no_offset(sym, value); 554 sprint_symbol_no_offset(sym, value);
@@ -987,6 +994,7 @@ int kptr_restrict __read_mostly;
987 * - 'f' For simple symbolic function names without offset 994 * - 'f' For simple symbolic function names without offset
988 * - 'S' For symbolic direct pointers with offset 995 * - 'S' For symbolic direct pointers with offset
989 * - 's' For symbolic direct pointers without offset 996 * - 's' For symbolic direct pointers without offset
997 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
990 * - 'B' For backtraced symbolic direct pointers with offset 998 * - 'B' For backtraced symbolic direct pointers with offset
991 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] 999 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
992 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] 1000 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
@@ -1060,7 +1068,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1060 case 'S': 1068 case 'S':
1061 case 's': 1069 case 's':
1062 case 'B': 1070 case 'B':
1063 return symbol_string(buf, end, ptr, spec, *fmt); 1071 return symbol_string(buf, end, ptr, spec, fmt);
1064 case 'R': 1072 case 'R':
1065 case 'r': 1073 case 'r':
1066 return resource_string(buf, end, ptr, spec, fmt); 1074 return resource_string(buf, end, ptr, spec, fmt);