aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug17
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bitmap.c28
-rw-r--r--lib/bug.c28
-rw-r--r--lib/cmdline.c57
-rw-r--r--lib/iov_iter.c8
-rw-r--r--lib/kobject.c5
-rw-r--r--lib/refcount.c169
-rw-r--r--lib/sbitmap.c75
-rw-r--r--lib/string.c2
-rw-r--r--lib/test_user_copy.c1
-rw-r--r--lib/usercopy.c26
-rw-r--r--lib/vsprintf.c6
13 files changed, 335 insertions, 89 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 97d62c2da6c2..e2a617e09ab7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -130,7 +130,8 @@ config DYNAMIC_DEBUG
130 nullarbor:~ # echo -n 'func svc_process -p' > 130 nullarbor:~ # echo -n 'func svc_process -p' >
131 <debugfs>/dynamic_debug/control 131 <debugfs>/dynamic_debug/control
132 132
133 See Documentation/dynamic-debug-howto.txt for additional information. 133 See Documentation/admin-guide/dynamic-debug-howto.rst for additional
134 information.
134 135
135endmenu # "printk and dmesg options" 136endmenu # "printk and dmesg options"
136 137
@@ -356,7 +357,7 @@ config FRAME_POINTER
356 bool "Compile the kernel with frame pointers" 357 bool "Compile the kernel with frame pointers"
357 depends on DEBUG_KERNEL && \ 358 depends on DEBUG_KERNEL && \
358 (CRIS || M68K || FRV || UML || \ 359 (CRIS || M68K || FRV || UML || \
359 AVR32 || SUPERH || BLACKFIN || MN10300 || METAG) || \ 360 SUPERH || BLACKFIN || MN10300 || METAG) || \
360 ARCH_WANT_FRAME_POINTERS 361 ARCH_WANT_FRAME_POINTERS
361 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS 362 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
362 help 363 help
@@ -404,8 +405,8 @@ config MAGIC_SYSRQ
404 by pressing various keys while holding SysRq (Alt+PrintScreen). It 405 by pressing various keys while holding SysRq (Alt+PrintScreen). It
405 also works on a serial console (on PC hardware at least), if you 406 also works on a serial console (on PC hardware at least), if you
406 send a BREAK and then within 5 seconds a command keypress. The 407 send a BREAK and then within 5 seconds a command keypress. The
407 keys are documented in <file:Documentation/sysrq.txt>. Don't say Y 408 keys are documented in <file:Documentation/admin-guide/sysrq.rst>.
408 unless you really know what this hack does. 409 Don't say Y unless you really know what this hack does.
409 410
410config MAGIC_SYSRQ_DEFAULT_ENABLE 411config MAGIC_SYSRQ_DEFAULT_ENABLE
411 hex "Enable magic SysRq key functions by default" 412 hex "Enable magic SysRq key functions by default"
@@ -414,7 +415,7 @@ config MAGIC_SYSRQ_DEFAULT_ENABLE
414 help 415 help
415 Specifies which SysRq key functions are enabled by default. 416 Specifies which SysRq key functions are enabled by default.
416 This may be set to 1 or 0 to enable or disable them all, or 417 This may be set to 1 or 0 to enable or disable them all, or
417 to a bitmask as described in Documentation/sysrq.txt. 418 to a bitmask as described in Documentation/admin-guide/sysrq.rst.
418 419
419config MAGIC_SYSRQ_SERIAL 420config MAGIC_SYSRQ_SERIAL
420 bool "Enable magic SysRq key over serial" 421 bool "Enable magic SysRq key over serial"
@@ -1103,9 +1104,6 @@ config PROVE_LOCKING
1103 1104
1104 For more details, see Documentation/locking/lockdep-design.txt. 1105 For more details, see Documentation/locking/lockdep-design.txt.
1105 1106
1106config PROVE_LOCKING_SMALL
1107 bool
1108
1109config LOCKDEP 1107config LOCKDEP
1110 bool 1108 bool
1111 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 1109 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
@@ -1114,6 +1112,9 @@ config LOCKDEP
1114 select KALLSYMS 1112 select KALLSYMS
1115 select KALLSYMS_ALL 1113 select KALLSYMS_ALL
1116 1114
1115config LOCKDEP_SMALL
1116 bool
1117
1117config LOCK_STAT 1118config LOCK_STAT
1118 bool "Lock usage statistics" 1119 bool "Lock usage statistics"
1119 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 1120 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/Makefile b/lib/Makefile
index 320ac46a8725..b47cf97e1e68 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -41,7 +41,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
41 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ 41 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
42 bsearch.o find_bit.o llist.o memweight.o kfifo.o \ 42 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
43 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ 43 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
44 once.o refcount.o 44 once.o refcount.o usercopy.o
45obj-y += string_helpers.o 45obj-y += string_helpers.o
46obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 46obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
47obj-y += hexdump.o 47obj-y += hexdump.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 0b66f0e5eb6b..08c6ef3a2b6f 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -502,11 +502,11 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf);
502 * Syntax: range:used_size/group_size 502 * Syntax: range:used_size/group_size
503 * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769 503 * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
504 * 504 *
505 * Returns 0 on success, -errno on invalid input strings. 505 * Returns: 0 on success, -errno on invalid input strings. Error values:
506 * Error values: 506 *
507 * %-EINVAL: second number in range smaller than first 507 * - ``-EINVAL``: second number in range smaller than first
508 * %-EINVAL: invalid character in string 508 * - ``-EINVAL``: invalid character in string
509 * %-ERANGE: bit number specified too large for mask 509 * - ``-ERANGE``: bit number specified too large for mask
510 */ 510 */
511static int __bitmap_parselist(const char *buf, unsigned int buflen, 511static int __bitmap_parselist(const char *buf, unsigned int buflen,
512 int is_user, unsigned long *maskp, 512 int is_user, unsigned long *maskp,
@@ -864,14 +864,16 @@ EXPORT_SYMBOL(bitmap_bitremap);
864 * 11 was set in @orig had no affect on @dst. 864 * 11 was set in @orig had no affect on @dst.
865 * 865 *
866 * Example [2] for bitmap_fold() + bitmap_onto(): 866 * Example [2] for bitmap_fold() + bitmap_onto():
867 * Let's say @relmap has these ten bits set: 867 * Let's say @relmap has these ten bits set::
868 *
868 * 40 41 42 43 45 48 53 61 74 95 869 * 40 41 42 43 45 48 53 61 74 95
870 *
869 * (for the curious, that's 40 plus the first ten terms of the 871 * (for the curious, that's 40 plus the first ten terms of the
870 * Fibonacci sequence.) 872 * Fibonacci sequence.)
871 * 873 *
872 * Further lets say we use the following code, invoking 874 * Further lets say we use the following code, invoking
873 * bitmap_fold() then bitmap_onto, as suggested above to 875 * bitmap_fold() then bitmap_onto, as suggested above to
874 * avoid the possibility of an empty @dst result: 876 * avoid the possibility of an empty @dst result::
875 * 877 *
876 * unsigned long *tmp; // a temporary bitmap's bits 878 * unsigned long *tmp; // a temporary bitmap's bits
877 * 879 *
@@ -882,22 +884,26 @@ EXPORT_SYMBOL(bitmap_bitremap);
882 * various @orig's. I list the zero-based positions of each set bit. 884 * various @orig's. I list the zero-based positions of each set bit.
883 * The tmp column shows the intermediate result, as computed by 885 * The tmp column shows the intermediate result, as computed by
884 * using bitmap_fold() to fold the @orig bitmap modulo ten 886 * using bitmap_fold() to fold the @orig bitmap modulo ten
885 * (the weight of @relmap). 887 * (the weight of @relmap):
886 * 888 *
889 * =============== ============== =================
887 * @orig tmp @dst 890 * @orig tmp @dst
888 * 0 0 40 891 * 0 0 40
889 * 1 1 41 892 * 1 1 41
890 * 9 9 95 893 * 9 9 95
891 * 10 0 40 (*) 894 * 10 0 40 [#f1]_
892 * 1 3 5 7 1 3 5 7 41 43 48 61 895 * 1 3 5 7 1 3 5 7 41 43 48 61
893 * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 896 * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45
894 * 0 9 18 27 0 9 8 7 40 61 74 95 897 * 0 9 18 27 0 9 8 7 40 61 74 95
895 * 0 10 20 30 0 40 898 * 0 10 20 30 0 40
896 * 0 11 22 33 0 1 2 3 40 41 42 43 899 * 0 11 22 33 0 1 2 3 40 41 42 43
897 * 0 12 24 36 0 2 4 6 40 42 45 53 900 * 0 12 24 36 0 2 4 6 40 42 45 53
898 * 78 102 211 1 2 8 41 42 74 (*) 901 * 78 102 211 1 2 8 41 42 74 [#f1]_
902 * =============== ============== =================
903 *
904 * .. [#f1]
899 * 905 *
900 * (*) For these marked lines, if we hadn't first done bitmap_fold() 906 * For these marked lines, if we hadn't first done bitmap_fold()
901 * into tmp, then the @dst result would have been empty. 907 * into tmp, then the @dst result would have been empty.
902 * 908 *
903 * If either of @orig or @relmap is empty (no set bits), then @dst 909 * If either of @orig or @relmap is empty (no set bits), then @dst
diff --git a/lib/bug.c b/lib/bug.c
index 06edbbef0623..a6a1137d06db 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -47,7 +47,7 @@
47#include <linux/sched.h> 47#include <linux/sched.h>
48#include <linux/rculist.h> 48#include <linux/rculist.h>
49 49
50extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; 50extern struct bug_entry __start___bug_table[], __stop___bug_table[];
51 51
52static inline unsigned long bug_addr(const struct bug_entry *bug) 52static inline unsigned long bug_addr(const struct bug_entry *bug)
53{ 53{
@@ -62,10 +62,10 @@ static inline unsigned long bug_addr(const struct bug_entry *bug)
62/* Updates are protected by module mutex */ 62/* Updates are protected by module mutex */
63static LIST_HEAD(module_bug_list); 63static LIST_HEAD(module_bug_list);
64 64
65static const struct bug_entry *module_find_bug(unsigned long bugaddr) 65static struct bug_entry *module_find_bug(unsigned long bugaddr)
66{ 66{
67 struct module *mod; 67 struct module *mod;
68 const struct bug_entry *bug = NULL; 68 struct bug_entry *bug = NULL;
69 69
70 rcu_read_lock_sched(); 70 rcu_read_lock_sched();
71 list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { 71 list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
@@ -122,15 +122,15 @@ void module_bug_cleanup(struct module *mod)
122 122
123#else 123#else
124 124
125static inline const struct bug_entry *module_find_bug(unsigned long bugaddr) 125static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
126{ 126{
127 return NULL; 127 return NULL;
128} 128}
129#endif 129#endif
130 130
131const struct bug_entry *find_bug(unsigned long bugaddr) 131struct bug_entry *find_bug(unsigned long bugaddr)
132{ 132{
133 const struct bug_entry *bug; 133 struct bug_entry *bug;
134 134
135 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) 135 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
136 if (bugaddr == bug_addr(bug)) 136 if (bugaddr == bug_addr(bug))
@@ -141,9 +141,9 @@ const struct bug_entry *find_bug(unsigned long bugaddr)
141 141
142enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) 142enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
143{ 143{
144 const struct bug_entry *bug; 144 struct bug_entry *bug;
145 const char *file; 145 const char *file;
146 unsigned line, warning; 146 unsigned line, warning, once, done;
147 147
148 if (!is_valid_bugaddr(bugaddr)) 148 if (!is_valid_bugaddr(bugaddr))
149 return BUG_TRAP_TYPE_NONE; 149 return BUG_TRAP_TYPE_NONE;
@@ -164,6 +164,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
164 line = bug->line; 164 line = bug->line;
165#endif 165#endif
166 warning = (bug->flags & BUGFLAG_WARNING) != 0; 166 warning = (bug->flags & BUGFLAG_WARNING) != 0;
167 once = (bug->flags & BUGFLAG_ONCE) != 0;
168 done = (bug->flags & BUGFLAG_DONE) != 0;
169
170 if (warning && once) {
171 if (done)
172 return BUG_TRAP_TYPE_WARN;
173
174 /*
175 * Since this is the only store, concurrency is not an issue.
176 */
177 bug->flags |= BUGFLAG_DONE;
178 }
167 } 179 }
168 180
169 if (warning) { 181 if (warning) {
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 8f13cf73c2ec..3c6432df7e63 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -15,6 +15,7 @@
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/ctype.h>
18 19
19/* 20/*
20 * If a hyphen was found in get_option, this will handle the 21 * If a hyphen was found in get_option, this will handle the
@@ -189,3 +190,59 @@ bool parse_option_str(const char *str, const char *option)
189 190
190 return false; 191 return false;
191} 192}
193
194/*
195 * Parse a string to get a param value pair.
196 * You can use " around spaces, but can't escape ".
197 * Hyphens and underscores equivalent in parameter names.
198 */
199char *next_arg(char *args, char **param, char **val)
200{
201 unsigned int i, equals = 0;
202 int in_quote = 0, quoted = 0;
203 char *next;
204
205 if (*args == '"') {
206 args++;
207 in_quote = 1;
208 quoted = 1;
209 }
210
211 for (i = 0; args[i]; i++) {
212 if (isspace(args[i]) && !in_quote)
213 break;
214 if (equals == 0) {
215 if (args[i] == '=')
216 equals = i;
217 }
218 if (args[i] == '"')
219 in_quote = !in_quote;
220 }
221
222 *param = args;
223 if (!equals)
224 *val = NULL;
225 else {
226 args[equals] = '\0';
227 *val = args + equals + 1;
228
229 /* Don't include quotes in value. */
230 if (**val == '"') {
231 (*val)++;
232 if (args[i-1] == '"')
233 args[i-1] = '\0';
234 }
235 }
236 if (quoted && args[i-1] == '"')
237 args[i-1] = '\0';
238
239 if (args[i]) {
240 args[i] = '\0';
241 next = args + i + 1;
242 } else
243 next = args + i;
244
245 /* Chew up trailing spaces. */
246 return skip_spaces(next);
247 //return next;
248}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 60abc44385b7..4952311422c1 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -413,7 +413,7 @@ void iov_iter_init(struct iov_iter *i, int direction,
413 size_t count) 413 size_t count)
414{ 414{
415 /* It will get better. Eventually... */ 415 /* It will get better. Eventually... */
416 if (segment_eq(get_fs(), KERNEL_DS)) { 416 if (uaccess_kernel()) {
417 direction |= ITER_KVEC; 417 direction |= ITER_KVEC;
418 i->type = direction; 418 i->type = direction;
419 i->kvec = (struct kvec *)iov; 419 i->kvec = (struct kvec *)iov;
@@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
604 return 0; 604 return 0;
605 } 605 }
606 iterate_and_advance(i, bytes, v, 606 iterate_and_advance(i, bytes, v,
607 __copy_from_user_nocache((to += v.iov_len) - v.iov_len, 607 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
608 v.iov_base, v.iov_len), 608 v.iov_base, v.iov_len),
609 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 609 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
610 v.bv_offset, v.bv_len), 610 v.bv_offset, v.bv_len),
@@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
625 if (unlikely(i->count < bytes)) 625 if (unlikely(i->count < bytes))
626 return false; 626 return false;
627 iterate_all_kinds(i, bytes, v, ({ 627 iterate_all_kinds(i, bytes, v, ({
628 if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len, 628 if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
629 v.iov_base, v.iov_len)) 629 v.iov_base, v.iov_len))
630 return false; 630 return false;
631 0;}), 631 0;}),
@@ -798,7 +798,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
798 while (1) { 798 while (1) {
799 size_t n = off - pipe->bufs[idx].offset; 799 size_t n = off - pipe->bufs[idx].offset;
800 if (unroll < n) { 800 if (unroll < n) {
801 off -= (n - unroll); 801 off -= unroll;
802 break; 802 break;
803 } 803 }
804 unroll -= n; 804 unroll -= n;
diff --git a/lib/kobject.c b/lib/kobject.c
index 445dcaeb0f56..763d70a18941 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -601,12 +601,15 @@ struct kobject *kobject_get(struct kobject *kobj)
601} 601}
602EXPORT_SYMBOL(kobject_get); 602EXPORT_SYMBOL(kobject_get);
603 603
604static struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj) 604struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj)
605{ 605{
606 if (!kobj)
607 return NULL;
606 if (!kref_get_unless_zero(&kobj->kref)) 608 if (!kref_get_unless_zero(&kobj->kref))
607 kobj = NULL; 609 kobj = NULL;
608 return kobj; 610 return kobj;
609} 611}
612EXPORT_SYMBOL(kobject_get_unless_zero);
610 613
611/* 614/*
612 * kobject_cleanup - free kobject resources. 615 * kobject_cleanup - free kobject resources.
diff --git a/lib/refcount.c b/lib/refcount.c
index aa09ad3c30b0..f42124ccf295 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -37,11 +37,29 @@
37#include <linux/refcount.h> 37#include <linux/refcount.h>
38#include <linux/bug.h> 38#include <linux/bug.h>
39 39
40/**
41 * refcount_add_not_zero - add a value to a refcount unless it is 0
42 * @i: the value to add to the refcount
43 * @r: the refcount
44 *
45 * Will saturate at UINT_MAX and WARN.
46 *
47 * Provides no memory ordering, it is assumed the caller has guaranteed the
48 * object memory to be stable (RCU, etc.). It does provide a control dependency
49 * and thereby orders future stores. See the comment on top.
50 *
51 * Use of this function is not recommended for the normal reference counting
52 * use case in which references are taken and released one at a time. In these
53 * cases, refcount_inc(), or one of its variants, should instead be used to
54 * increment a reference count.
55 *
56 * Return: false if the passed refcount is 0, true otherwise
57 */
40bool refcount_add_not_zero(unsigned int i, refcount_t *r) 58bool refcount_add_not_zero(unsigned int i, refcount_t *r)
41{ 59{
42 unsigned int old, new, val = atomic_read(&r->refs); 60 unsigned int new, val = atomic_read(&r->refs);
43 61
44 for (;;) { 62 do {
45 if (!val) 63 if (!val)
46 return false; 64 return false;
47 65
@@ -51,12 +69,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
51 new = val + i; 69 new = val + i;
52 if (new < val) 70 if (new < val)
53 new = UINT_MAX; 71 new = UINT_MAX;
54 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
55 if (old == val)
56 break;
57 72
58 val = old; 73 } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
59 }
60 74
61 WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); 75 WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
62 76
@@ -64,24 +78,45 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
64} 78}
65EXPORT_SYMBOL_GPL(refcount_add_not_zero); 79EXPORT_SYMBOL_GPL(refcount_add_not_zero);
66 80
81/**
82 * refcount_add - add a value to a refcount
83 * @i: the value to add to the refcount
84 * @r: the refcount
85 *
86 * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
87 *
88 * Provides no memory ordering, it is assumed the caller has guaranteed the
89 * object memory to be stable (RCU, etc.). It does provide a control dependency
90 * and thereby orders future stores. See the comment on top.
91 *
92 * Use of this function is not recommended for the normal reference counting
93 * use case in which references are taken and released one at a time. In these
94 * cases, refcount_inc(), or one of its variants, should instead be used to
95 * increment a reference count.
96 */
67void refcount_add(unsigned int i, refcount_t *r) 97void refcount_add(unsigned int i, refcount_t *r)
68{ 98{
69 WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); 99 WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
70} 100}
71EXPORT_SYMBOL_GPL(refcount_add); 101EXPORT_SYMBOL_GPL(refcount_add);
72 102
73/* 103/**
74 * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. 104 * refcount_inc_not_zero - increment a refcount unless it is 0
105 * @r: the refcount to increment
106 *
107 * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
75 * 108 *
76 * Provides no memory ordering, it is assumed the caller has guaranteed the 109 * Provides no memory ordering, it is assumed the caller has guaranteed the
77 * object memory to be stable (RCU, etc.). It does provide a control dependency 110 * object memory to be stable (RCU, etc.). It does provide a control dependency
78 * and thereby orders future stores. See the comment on top. 111 * and thereby orders future stores. See the comment on top.
112 *
113 * Return: true if the increment was successful, false otherwise
79 */ 114 */
80bool refcount_inc_not_zero(refcount_t *r) 115bool refcount_inc_not_zero(refcount_t *r)
81{ 116{
82 unsigned int old, new, val = atomic_read(&r->refs); 117 unsigned int new, val = atomic_read(&r->refs);
83 118
84 for (;;) { 119 do {
85 new = val + 1; 120 new = val + 1;
86 121
87 if (!val) 122 if (!val)
@@ -90,12 +125,7 @@ bool refcount_inc_not_zero(refcount_t *r)
90 if (unlikely(!new)) 125 if (unlikely(!new))
91 return true; 126 return true;
92 127
93 old = atomic_cmpxchg_relaxed(&r->refs, val, new); 128 } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
94 if (old == val)
95 break;
96
97 val = old;
98 }
99 129
100 WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); 130 WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
101 131
@@ -103,11 +133,17 @@ bool refcount_inc_not_zero(refcount_t *r)
103} 133}
104EXPORT_SYMBOL_GPL(refcount_inc_not_zero); 134EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
105 135
106/* 136/**
107 * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. 137 * refcount_inc - increment a refcount
138 * @r: the refcount to increment
139 *
140 * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
108 * 141 *
109 * Provides no memory ordering, it is assumed the caller already has a 142 * Provides no memory ordering, it is assumed the caller already has a
110 * reference on the object, will WARN when this is not so. 143 * reference on the object.
144 *
145 * Will WARN if the refcount is 0, as this represents a possible use-after-free
146 * condition.
111 */ 147 */
112void refcount_inc(refcount_t *r) 148void refcount_inc(refcount_t *r)
113{ 149{
@@ -115,11 +151,31 @@ void refcount_inc(refcount_t *r)
115} 151}
116EXPORT_SYMBOL_GPL(refcount_inc); 152EXPORT_SYMBOL_GPL(refcount_inc);
117 153
154/**
155 * refcount_sub_and_test - subtract from a refcount and test if it is 0
156 * @i: amount to subtract from the refcount
157 * @r: the refcount
158 *
159 * Similar to atomic_dec_and_test(), but it will WARN, return false and
160 * ultimately leak on underflow and will fail to decrement when saturated
161 * at UINT_MAX.
162 *
163 * Provides release memory ordering, such that prior loads and stores are done
164 * before, and provides a control dependency such that free() must come after.
165 * See the comment on top.
166 *
167 * Use of this function is not recommended for the normal reference counting
168 * use case in which references are taken and released one at a time. In these
169 * cases, refcount_dec(), or one of its variants, should instead be used to
170 * decrement a reference count.
171 *
172 * Return: true if the resulting refcount is 0, false otherwise
173 */
118bool refcount_sub_and_test(unsigned int i, refcount_t *r) 174bool refcount_sub_and_test(unsigned int i, refcount_t *r)
119{ 175{
120 unsigned int old, new, val = atomic_read(&r->refs); 176 unsigned int new, val = atomic_read(&r->refs);
121 177
122 for (;;) { 178 do {
123 if (unlikely(val == UINT_MAX)) 179 if (unlikely(val == UINT_MAX))
124 return false; 180 return false;
125 181
@@ -129,24 +185,24 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r)
129 return false; 185 return false;
130 } 186 }
131 187
132 old = atomic_cmpxchg_release(&r->refs, val, new); 188 } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
133 if (old == val)
134 break;
135
136 val = old;
137 }
138 189
139 return !new; 190 return !new;
140} 191}
141EXPORT_SYMBOL_GPL(refcount_sub_and_test); 192EXPORT_SYMBOL_GPL(refcount_sub_and_test);
142 193
143/* 194/**
195 * refcount_dec_and_test - decrement a refcount and test if it is 0
196 * @r: the refcount
197 *
144 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to 198 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
145 * decrement when saturated at UINT_MAX. 199 * decrement when saturated at UINT_MAX.
146 * 200 *
147 * Provides release memory ordering, such that prior loads and stores are done 201 * Provides release memory ordering, such that prior loads and stores are done
148 * before, and provides a control dependency such that free() must come after. 202 * before, and provides a control dependency such that free() must come after.
149 * See the comment on top. 203 * See the comment on top.
204 *
205 * Return: true if the resulting refcount is 0, false otherwise
150 */ 206 */
151bool refcount_dec_and_test(refcount_t *r) 207bool refcount_dec_and_test(refcount_t *r)
152{ 208{
@@ -154,21 +210,26 @@ bool refcount_dec_and_test(refcount_t *r)
154} 210}
155EXPORT_SYMBOL_GPL(refcount_dec_and_test); 211EXPORT_SYMBOL_GPL(refcount_dec_and_test);
156 212
157/* 213/**
214 * refcount_dec - decrement a refcount
215 * @r: the refcount
216 *
158 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement 217 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
159 * when saturated at UINT_MAX. 218 * when saturated at UINT_MAX.
160 * 219 *
161 * Provides release memory ordering, such that prior loads and stores are done 220 * Provides release memory ordering, such that prior loads and stores are done
162 * before. 221 * before.
163 */ 222 */
164
165void refcount_dec(refcount_t *r) 223void refcount_dec(refcount_t *r)
166{ 224{
167 WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); 225 WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
168} 226}
169EXPORT_SYMBOL_GPL(refcount_dec); 227EXPORT_SYMBOL_GPL(refcount_dec);
170 228
171/* 229/**
230 * refcount_dec_if_one - decrement a refcount if it is 1
231 * @r: the refcount
232 *
172 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the 233 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
173 * success thereof. 234 * success thereof.
174 * 235 *
@@ -178,24 +239,33 @@ EXPORT_SYMBOL_GPL(refcount_dec);
178 * It can be used like a try-delete operator; this explicit case is provided 239 * It can be used like a try-delete operator; this explicit case is provided
179 * and not cmpxchg in generic, because that would allow implementing unsafe 240 * and not cmpxchg in generic, because that would allow implementing unsafe
180 * operations. 241 * operations.
242 *
243 * Return: true if the resulting refcount is 0, false otherwise
181 */ 244 */
182bool refcount_dec_if_one(refcount_t *r) 245bool refcount_dec_if_one(refcount_t *r)
183{ 246{
184 return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; 247 int val = 1;
248
249 return atomic_try_cmpxchg_release(&r->refs, &val, 0);
185} 250}
186EXPORT_SYMBOL_GPL(refcount_dec_if_one); 251EXPORT_SYMBOL_GPL(refcount_dec_if_one);
187 252
188/* 253/**
254 * refcount_dec_not_one - decrement a refcount if it is not 1
255 * @r: the refcount
256 *
189 * No atomic_t counterpart, it decrements unless the value is 1, in which case 257 * No atomic_t counterpart, it decrements unless the value is 1, in which case
190 * it will return false. 258 * it will return false.
191 * 259 *
192 * Was often done like: atomic_add_unless(&var, -1, 1) 260 * Was often done like: atomic_add_unless(&var, -1, 1)
261 *
262 * Return: true if the decrement operation was successful, false otherwise
193 */ 263 */
194bool refcount_dec_not_one(refcount_t *r) 264bool refcount_dec_not_one(refcount_t *r)
195{ 265{
196 unsigned int old, new, val = atomic_read(&r->refs); 266 unsigned int new, val = atomic_read(&r->refs);
197 267
198 for (;;) { 268 do {
199 if (unlikely(val == UINT_MAX)) 269 if (unlikely(val == UINT_MAX))
200 return true; 270 return true;
201 271
@@ -208,24 +278,27 @@ bool refcount_dec_not_one(refcount_t *r)
208 return true; 278 return true;
209 } 279 }
210 280
211 old = atomic_cmpxchg_release(&r->refs, val, new); 281 } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
212 if (old == val)
213 break;
214
215 val = old;
216 }
217 282
218 return true; 283 return true;
219} 284}
220EXPORT_SYMBOL_GPL(refcount_dec_not_one); 285EXPORT_SYMBOL_GPL(refcount_dec_not_one);
221 286
222/* 287/**
288 * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
289 * refcount to 0
290 * @r: the refcount
291 * @lock: the mutex to be locked
292 *
223 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail 293 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
224 * to decrement when saturated at UINT_MAX. 294 * to decrement when saturated at UINT_MAX.
225 * 295 *
226 * Provides release memory ordering, such that prior loads and stores are done 296 * Provides release memory ordering, such that prior loads and stores are done
227 * before, and provides a control dependency such that free() must come after. 297 * before, and provides a control dependency such that free() must come after.
228 * See the comment on top. 298 * See the comment on top.
299 *
300 * Return: true and hold mutex if able to decrement refcount to 0, false
301 * otherwise
229 */ 302 */
230bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) 303bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
231{ 304{
@@ -242,13 +315,21 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
242} 315}
243EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock); 316EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock);
244 317
245/* 318/**
319 * refcount_dec_and_lock - return holding spinlock if able to decrement
320 * refcount to 0
321 * @r: the refcount
322 * @lock: the spinlock to be locked
323 *
246 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to 324 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
247 * decrement when saturated at UINT_MAX. 325 * decrement when saturated at UINT_MAX.
248 * 326 *
249 * Provides release memory ordering, such that prior loads and stores are done 327 * Provides release memory ordering, such that prior loads and stores are done
250 * before, and provides a control dependency such that free() must come after. 328 * before, and provides a control dependency such that free() must come after.
251 * See the comment on top. 329 * See the comment on top.
330 *
331 * Return: true and hold spinlock if able to decrement refcount to 0, false
332 * otherwise
252 */ 333 */
253bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) 334bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
254{ 335{
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 60e800e0b5a0..80aa8d5463fa 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -79,15 +79,15 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
79} 79}
80EXPORT_SYMBOL_GPL(sbitmap_resize); 80EXPORT_SYMBOL_GPL(sbitmap_resize);
81 81
82static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint, 82static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
83 bool wrap) 83 unsigned int hint, bool wrap)
84{ 84{
85 unsigned int orig_hint = hint; 85 unsigned int orig_hint = hint;
86 int nr; 86 int nr;
87 87
88 while (1) { 88 while (1) {
89 nr = find_next_zero_bit(&word->word, word->depth, hint); 89 nr = find_next_zero_bit(word, depth, hint);
90 if (unlikely(nr >= word->depth)) { 90 if (unlikely(nr >= depth)) {
91 /* 91 /*
92 * We started with an offset, and we didn't reset the 92 * We started with an offset, and we didn't reset the
93 * offset to 0 in a failure case, so start from 0 to 93 * offset to 0 in a failure case, so start from 0 to
@@ -100,11 +100,11 @@ static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
100 return -1; 100 return -1;
101 } 101 }
102 102
103 if (!test_and_set_bit(nr, &word->word)) 103 if (!test_and_set_bit(nr, word))
104 break; 104 break;
105 105
106 hint = nr + 1; 106 hint = nr + 1;
107 if (hint >= word->depth - 1) 107 if (hint >= depth - 1)
108 hint = 0; 108 hint = 0;
109 } 109 }
110 110
@@ -119,7 +119,8 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
119 index = SB_NR_TO_INDEX(sb, alloc_hint); 119 index = SB_NR_TO_INDEX(sb, alloc_hint);
120 120
121 for (i = 0; i < sb->map_nr; i++) { 121 for (i = 0; i < sb->map_nr; i++) {
122 nr = __sbitmap_get_word(&sb->map[index], 122 nr = __sbitmap_get_word(&sb->map[index].word,
123 sb->map[index].depth,
123 SB_NR_TO_BIT(sb, alloc_hint), 124 SB_NR_TO_BIT(sb, alloc_hint),
124 !round_robin); 125 !round_robin);
125 if (nr != -1) { 126 if (nr != -1) {
@@ -141,6 +142,37 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
141} 142}
142EXPORT_SYMBOL_GPL(sbitmap_get); 143EXPORT_SYMBOL_GPL(sbitmap_get);
143 144
145int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
146 unsigned long shallow_depth)
147{
148 unsigned int i, index;
149 int nr = -1;
150
151 index = SB_NR_TO_INDEX(sb, alloc_hint);
152
153 for (i = 0; i < sb->map_nr; i++) {
154 nr = __sbitmap_get_word(&sb->map[index].word,
155 min(sb->map[index].depth, shallow_depth),
156 SB_NR_TO_BIT(sb, alloc_hint), true);
157 if (nr != -1) {
158 nr += index << sb->shift;
159 break;
160 }
161
162 /* Jump to next index. */
163 index++;
164 alloc_hint = index << sb->shift;
165
166 if (index >= sb->map_nr) {
167 index = 0;
168 alloc_hint = 0;
169 }
170 }
171
172 return nr;
173}
174EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
175
144bool sbitmap_any_bit_set(const struct sbitmap *sb) 176bool sbitmap_any_bit_set(const struct sbitmap *sb)
145{ 177{
146 unsigned int i; 178 unsigned int i;
@@ -342,6 +374,35 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq)
342} 374}
343EXPORT_SYMBOL_GPL(__sbitmap_queue_get); 375EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
344 376
377int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
378 unsigned int shallow_depth)
379{
380 unsigned int hint, depth;
381 int nr;
382
383 hint = this_cpu_read(*sbq->alloc_hint);
384 depth = READ_ONCE(sbq->sb.depth);
385 if (unlikely(hint >= depth)) {
386 hint = depth ? prandom_u32() % depth : 0;
387 this_cpu_write(*sbq->alloc_hint, hint);
388 }
389 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
390
391 if (nr == -1) {
392 /* If the map is full, a hint won't do us much good. */
393 this_cpu_write(*sbq->alloc_hint, 0);
394 } else if (nr == hint || unlikely(sbq->round_robin)) {
395 /* Only update the hint if we used it. */
396 hint = nr + 1;
397 if (hint >= depth - 1)
398 hint = 0;
399 this_cpu_write(*sbq->alloc_hint, hint);
400 }
401
402 return nr;
403}
404EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
405
345static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) 406static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
346{ 407{
347 int i, wake_index; 408 int i, wake_index;
diff --git a/lib/string.c b/lib/string.c
index ed83562a53ae..b5c9a1168d3a 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -131,7 +131,7 @@ EXPORT_SYMBOL(strncpy);
131 * @src: Where to copy the string from 131 * @src: Where to copy the string from
132 * @size: size of destination buffer 132 * @size: size of destination buffer
133 * 133 *
134 * Compatible with *BSD: the result is always a valid 134 * Compatible with ``*BSD``: the result is always a valid
135 * NUL-terminated string that fits in the buffer (unless, 135 * NUL-terminated string that fits in the buffer (unless,
136 * of course, the buffer size is zero). It does not pad 136 * of course, the buffer size is zero). It does not pad
137 * out the result like strncpy() does. 137 * out the result like strncpy() does.
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 1a8d71a68531..4621db801b23 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -31,7 +31,6 @@
31 * their capability at compile-time, we just have to opt-out certain archs. 31 * their capability at compile-time, we just have to opt-out certain archs.
32 */ 32 */
33#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \ 33#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
34 !defined(CONFIG_AVR32) && \
35 !defined(CONFIG_BLACKFIN) && \ 34 !defined(CONFIG_BLACKFIN) && \
36 !defined(CONFIG_M32R) && \ 35 !defined(CONFIG_M32R) && \
37 !defined(CONFIG_M68K) && \ 36 !defined(CONFIG_M68K) && \
diff --git a/lib/usercopy.c b/lib/usercopy.c
new file mode 100644
index 000000000000..1b6010a3beb8
--- /dev/null
+++ b/lib/usercopy.c
@@ -0,0 +1,26 @@
1#include <linux/uaccess.h>
2
3/* out-of-line parts */
4
5#ifndef INLINE_COPY_FROM_USER
6unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
7{
8 unsigned long res = n;
9 if (likely(access_ok(VERIFY_READ, from, n)))
10 res = raw_copy_from_user(to, from, n);
11 if (unlikely(res))
12 memset(to + (n - res), 0, res);
13 return res;
14}
15EXPORT_SYMBOL(_copy_from_user);
16#endif
17
18#ifndef INLINE_COPY_TO_USER
19unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
20{
21 if (likely(access_ok(VERIFY_WRITE, to, n)))
22 n = raw_copy_to_user(to, from, n);
23 return n;
24}
25EXPORT_SYMBOL(_copy_to_user);
26#endif
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index e3bf4e0f10b5..176641cc549d 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1954,13 +1954,13 @@ set_precision(struct printf_spec *spec, int prec)
1954 * This function generally follows C99 vsnprintf, but has some 1954 * This function generally follows C99 vsnprintf, but has some
1955 * extensions and a few limitations: 1955 * extensions and a few limitations:
1956 * 1956 *
1957 * %n is unsupported 1957 * - ``%n`` is unsupported
1958 * %p* is handled by pointer() 1958 * - ``%p*`` is handled by pointer()
1959 * 1959 *
1960 * See pointer() or Documentation/printk-formats.txt for more 1960 * See pointer() or Documentation/printk-formats.txt for more
1961 * extensive description. 1961 * extensive description.
1962 * 1962 *
1963 * ** Please update the documentation in both places when making changes ** 1963 * **Please update the documentation in both places when making changes**
1964 * 1964 *
1965 * The return value is the number of characters which would 1965 * The return value is the number of characters which would
1966 * be generated for the given input, excluding the trailing 1966 * be generated for the given input, excluding the trailing