diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 17 | ||||
| -rw-r--r-- | lib/Makefile | 4 | ||||
| -rw-r--r-- | lib/bitmap.c | 28 | ||||
| -rw-r--r-- | lib/bug.c | 28 | ||||
| -rw-r--r-- | lib/cmdline.c | 57 | ||||
| -rw-r--r-- | lib/iov_iter.c | 69 | ||||
| -rw-r--r-- | lib/kobject.c | 5 | ||||
| -rw-r--r-- | lib/md5.c | 95 | ||||
| -rw-r--r-- | lib/nlattr.c | 28 | ||||
| -rw-r--r-- | lib/refcount.c | 169 | ||||
| -rw-r--r-- | lib/rhashtable.c | 33 | ||||
| -rw-r--r-- | lib/sbitmap.c | 75 | ||||
| -rw-r--r-- | lib/string.c | 2 | ||||
| -rw-r--r-- | lib/syscall.c | 1 | ||||
| -rw-r--r-- | lib/test_bpf.c | 150 | ||||
| -rw-r--r-- | lib/test_kasan.c | 10 | ||||
| -rw-r--r-- | lib/test_user_copy.c | 1 | ||||
| -rw-r--r-- | lib/usercopy.c | 26 | ||||
| -rw-r--r-- | lib/vsprintf.c | 6 |
19 files changed, 586 insertions, 218 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 97d62c2da6c2..e2a617e09ab7 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -130,7 +130,8 @@ config DYNAMIC_DEBUG | |||
| 130 | nullarbor:~ # echo -n 'func svc_process -p' > | 130 | nullarbor:~ # echo -n 'func svc_process -p' > |
| 131 | <debugfs>/dynamic_debug/control | 131 | <debugfs>/dynamic_debug/control |
| 132 | 132 | ||
| 133 | See Documentation/dynamic-debug-howto.txt for additional information. | 133 | See Documentation/admin-guide/dynamic-debug-howto.rst for additional |
| 134 | information. | ||
| 134 | 135 | ||
| 135 | endmenu # "printk and dmesg options" | 136 | endmenu # "printk and dmesg options" |
| 136 | 137 | ||
| @@ -356,7 +357,7 @@ config FRAME_POINTER | |||
| 356 | bool "Compile the kernel with frame pointers" | 357 | bool "Compile the kernel with frame pointers" |
| 357 | depends on DEBUG_KERNEL && \ | 358 | depends on DEBUG_KERNEL && \ |
| 358 | (CRIS || M68K || FRV || UML || \ | 359 | (CRIS || M68K || FRV || UML || \ |
| 359 | AVR32 || SUPERH || BLACKFIN || MN10300 || METAG) || \ | 360 | SUPERH || BLACKFIN || MN10300 || METAG) || \ |
| 360 | ARCH_WANT_FRAME_POINTERS | 361 | ARCH_WANT_FRAME_POINTERS |
| 361 | default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS | 362 | default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS |
| 362 | help | 363 | help |
| @@ -404,8 +405,8 @@ config MAGIC_SYSRQ | |||
| 404 | by pressing various keys while holding SysRq (Alt+PrintScreen). It | 405 | by pressing various keys while holding SysRq (Alt+PrintScreen). It |
| 405 | also works on a serial console (on PC hardware at least), if you | 406 | also works on a serial console (on PC hardware at least), if you |
| 406 | send a BREAK and then within 5 seconds a command keypress. The | 407 | send a BREAK and then within 5 seconds a command keypress. The |
| 407 | keys are documented in <file:Documentation/sysrq.txt>. Don't say Y | 408 | keys are documented in <file:Documentation/admin-guide/sysrq.rst>. |
| 408 | unless you really know what this hack does. | 409 | Don't say Y unless you really know what this hack does. |
| 409 | 410 | ||
| 410 | config MAGIC_SYSRQ_DEFAULT_ENABLE | 411 | config MAGIC_SYSRQ_DEFAULT_ENABLE |
| 411 | hex "Enable magic SysRq key functions by default" | 412 | hex "Enable magic SysRq key functions by default" |
| @@ -414,7 +415,7 @@ config MAGIC_SYSRQ_DEFAULT_ENABLE | |||
| 414 | help | 415 | help |
| 415 | Specifies which SysRq key functions are enabled by default. | 416 | Specifies which SysRq key functions are enabled by default. |
| 416 | This may be set to 1 or 0 to enable or disable them all, or | 417 | This may be set to 1 or 0 to enable or disable them all, or |
| 417 | to a bitmask as described in Documentation/sysrq.txt. | 418 | to a bitmask as described in Documentation/admin-guide/sysrq.rst. |
| 418 | 419 | ||
| 419 | config MAGIC_SYSRQ_SERIAL | 420 | config MAGIC_SYSRQ_SERIAL |
| 420 | bool "Enable magic SysRq key over serial" | 421 | bool "Enable magic SysRq key over serial" |
| @@ -1103,9 +1104,6 @@ config PROVE_LOCKING | |||
| 1103 | 1104 | ||
| 1104 | For more details, see Documentation/locking/lockdep-design.txt. | 1105 | For more details, see Documentation/locking/lockdep-design.txt. |
| 1105 | 1106 | ||
| 1106 | config PROVE_LOCKING_SMALL | ||
| 1107 | bool | ||
| 1108 | |||
| 1109 | config LOCKDEP | 1107 | config LOCKDEP |
| 1110 | bool | 1108 | bool |
| 1111 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 1109 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
| @@ -1114,6 +1112,9 @@ config LOCKDEP | |||
| 1114 | select KALLSYMS | 1112 | select KALLSYMS |
| 1115 | select KALLSYMS_ALL | 1113 | select KALLSYMS_ALL |
| 1116 | 1114 | ||
| 1115 | config LOCKDEP_SMALL | ||
| 1116 | bool | ||
| 1117 | |||
| 1117 | config LOCK_STAT | 1118 | config LOCK_STAT |
| 1118 | bool "Lock usage statistics" | 1119 | bool "Lock usage statistics" |
| 1119 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 1120 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
diff --git a/lib/Makefile b/lib/Makefile index 320ac46a8725..a155c73e3437 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -19,7 +19,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n | |||
| 19 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ | 19 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ |
| 20 | rbtree.o radix-tree.o dump_stack.o timerqueue.o\ | 20 | rbtree.o radix-tree.o dump_stack.o timerqueue.o\ |
| 21 | idr.o int_sqrt.o extable.o \ | 21 | idr.o int_sqrt.o extable.o \ |
| 22 | sha1.o chacha20.o md5.o irq_regs.o argv_split.o \ | 22 | sha1.o chacha20.o irq_regs.o argv_split.o \ |
| 23 | flex_proportions.o ratelimit.o show_mem.o \ | 23 | flex_proportions.o ratelimit.o show_mem.o \ |
| 24 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ | 24 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ |
| 25 | earlycpio.o seq_buf.o siphash.o \ | 25 | earlycpio.o seq_buf.o siphash.o \ |
| @@ -41,7 +41,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ | |||
| 41 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ | 41 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ |
| 42 | bsearch.o find_bit.o llist.o memweight.o kfifo.o \ | 42 | bsearch.o find_bit.o llist.o memweight.o kfifo.o \ |
| 43 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ | 43 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ |
| 44 | once.o refcount.o | 44 | once.o refcount.o usercopy.o |
| 45 | obj-y += string_helpers.o | 45 | obj-y += string_helpers.o |
| 46 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 46 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
| 47 | obj-y += hexdump.o | 47 | obj-y += hexdump.o |
diff --git a/lib/bitmap.c b/lib/bitmap.c index 0b66f0e5eb6b..08c6ef3a2b6f 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
| @@ -502,11 +502,11 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf); | |||
| 502 | * Syntax: range:used_size/group_size | 502 | * Syntax: range:used_size/group_size |
| 503 | * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769 | 503 | * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769 |
| 504 | * | 504 | * |
| 505 | * Returns 0 on success, -errno on invalid input strings. | 505 | * Returns: 0 on success, -errno on invalid input strings. Error values: |
| 506 | * Error values: | 506 | * |
| 507 | * %-EINVAL: second number in range smaller than first | 507 | * - ``-EINVAL``: second number in range smaller than first |
| 508 | * %-EINVAL: invalid character in string | 508 | * - ``-EINVAL``: invalid character in string |
| 509 | * %-ERANGE: bit number specified too large for mask | 509 | * - ``-ERANGE``: bit number specified too large for mask |
| 510 | */ | 510 | */ |
| 511 | static int __bitmap_parselist(const char *buf, unsigned int buflen, | 511 | static int __bitmap_parselist(const char *buf, unsigned int buflen, |
| 512 | int is_user, unsigned long *maskp, | 512 | int is_user, unsigned long *maskp, |
| @@ -864,14 +864,16 @@ EXPORT_SYMBOL(bitmap_bitremap); | |||
| 864 | * 11 was set in @orig had no affect on @dst. | 864 | * 11 was set in @orig had no affect on @dst. |
| 865 | * | 865 | * |
| 866 | * Example [2] for bitmap_fold() + bitmap_onto(): | 866 | * Example [2] for bitmap_fold() + bitmap_onto(): |
| 867 | * Let's say @relmap has these ten bits set: | 867 | * Let's say @relmap has these ten bits set:: |
| 868 | * | ||
| 868 | * 40 41 42 43 45 48 53 61 74 95 | 869 | * 40 41 42 43 45 48 53 61 74 95 |
| 870 | * | ||
| 869 | * (for the curious, that's 40 plus the first ten terms of the | 871 | * (for the curious, that's 40 plus the first ten terms of the |
| 870 | * Fibonacci sequence.) | 872 | * Fibonacci sequence.) |
| 871 | * | 873 | * |
| 872 | * Further lets say we use the following code, invoking | 874 | * Further lets say we use the following code, invoking |
| 873 | * bitmap_fold() then bitmap_onto, as suggested above to | 875 | * bitmap_fold() then bitmap_onto, as suggested above to |
| 874 | * avoid the possibility of an empty @dst result: | 876 | * avoid the possibility of an empty @dst result:: |
| 875 | * | 877 | * |
| 876 | * unsigned long *tmp; // a temporary bitmap's bits | 878 | * unsigned long *tmp; // a temporary bitmap's bits |
| 877 | * | 879 | * |
| @@ -882,22 +884,26 @@ EXPORT_SYMBOL(bitmap_bitremap); | |||
| 882 | * various @orig's. I list the zero-based positions of each set bit. | 884 | * various @orig's. I list the zero-based positions of each set bit. |
| 883 | * The tmp column shows the intermediate result, as computed by | 885 | * The tmp column shows the intermediate result, as computed by |
| 884 | * using bitmap_fold() to fold the @orig bitmap modulo ten | 886 | * using bitmap_fold() to fold the @orig bitmap modulo ten |
| 885 | * (the weight of @relmap). | 887 | * (the weight of @relmap): |
| 886 | * | 888 | * |
| 889 | * =============== ============== ================= | ||
| 887 | * @orig tmp @dst | 890 | * @orig tmp @dst |
| 888 | * 0 0 40 | 891 | * 0 0 40 |
| 889 | * 1 1 41 | 892 | * 1 1 41 |
| 890 | * 9 9 95 | 893 | * 9 9 95 |
| 891 | * 10 0 40 (*) | 894 | * 10 0 40 [#f1]_ |
| 892 | * 1 3 5 7 1 3 5 7 41 43 48 61 | 895 | * 1 3 5 7 1 3 5 7 41 43 48 61 |
| 893 | * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 | 896 | * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 |
| 894 | * 0 9 18 27 0 9 8 7 40 61 74 95 | 897 | * 0 9 18 27 0 9 8 7 40 61 74 95 |
| 895 | * 0 10 20 30 0 40 | 898 | * 0 10 20 30 0 40 |
| 896 | * 0 11 22 33 0 1 2 3 40 41 42 43 | 899 | * 0 11 22 33 0 1 2 3 40 41 42 43 |
| 897 | * 0 12 24 36 0 2 4 6 40 42 45 53 | 900 | * 0 12 24 36 0 2 4 6 40 42 45 53 |
| 898 | * 78 102 211 1 2 8 41 42 74 (*) | 901 | * 78 102 211 1 2 8 41 42 74 [#f1]_ |
| 902 | * =============== ============== ================= | ||
| 903 | * | ||
| 904 | * .. [#f1] | ||
| 899 | * | 905 | * |
| 900 | * (*) For these marked lines, if we hadn't first done bitmap_fold() | 906 | * For these marked lines, if we hadn't first done bitmap_fold() |
| 901 | * into tmp, then the @dst result would have been empty. | 907 | * into tmp, then the @dst result would have been empty. |
| 902 | * | 908 | * |
| 903 | * If either of @orig or @relmap is empty (no set bits), then @dst | 909 | * If either of @orig or @relmap is empty (no set bits), then @dst |
| @@ -47,7 +47,7 @@ | |||
| 47 | #include <linux/sched.h> | 47 | #include <linux/sched.h> |
| 48 | #include <linux/rculist.h> | 48 | #include <linux/rculist.h> |
| 49 | 49 | ||
| 50 | extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; | 50 | extern struct bug_entry __start___bug_table[], __stop___bug_table[]; |
| 51 | 51 | ||
| 52 | static inline unsigned long bug_addr(const struct bug_entry *bug) | 52 | static inline unsigned long bug_addr(const struct bug_entry *bug) |
| 53 | { | 53 | { |
| @@ -62,10 +62,10 @@ static inline unsigned long bug_addr(const struct bug_entry *bug) | |||
| 62 | /* Updates are protected by module mutex */ | 62 | /* Updates are protected by module mutex */ |
| 63 | static LIST_HEAD(module_bug_list); | 63 | static LIST_HEAD(module_bug_list); |
| 64 | 64 | ||
| 65 | static const struct bug_entry *module_find_bug(unsigned long bugaddr) | 65 | static struct bug_entry *module_find_bug(unsigned long bugaddr) |
| 66 | { | 66 | { |
| 67 | struct module *mod; | 67 | struct module *mod; |
| 68 | const struct bug_entry *bug = NULL; | 68 | struct bug_entry *bug = NULL; |
| 69 | 69 | ||
| 70 | rcu_read_lock_sched(); | 70 | rcu_read_lock_sched(); |
| 71 | list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { | 71 | list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { |
| @@ -122,15 +122,15 @@ void module_bug_cleanup(struct module *mod) | |||
| 122 | 122 | ||
| 123 | #else | 123 | #else |
| 124 | 124 | ||
| 125 | static inline const struct bug_entry *module_find_bug(unsigned long bugaddr) | 125 | static inline struct bug_entry *module_find_bug(unsigned long bugaddr) |
| 126 | { | 126 | { |
| 127 | return NULL; | 127 | return NULL; |
| 128 | } | 128 | } |
| 129 | #endif | 129 | #endif |
| 130 | 130 | ||
| 131 | const struct bug_entry *find_bug(unsigned long bugaddr) | 131 | struct bug_entry *find_bug(unsigned long bugaddr) |
| 132 | { | 132 | { |
| 133 | const struct bug_entry *bug; | 133 | struct bug_entry *bug; |
| 134 | 134 | ||
| 135 | for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) | 135 | for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) |
| 136 | if (bugaddr == bug_addr(bug)) | 136 | if (bugaddr == bug_addr(bug)) |
| @@ -141,9 +141,9 @@ const struct bug_entry *find_bug(unsigned long bugaddr) | |||
| 141 | 141 | ||
| 142 | enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | 142 | enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) |
| 143 | { | 143 | { |
| 144 | const struct bug_entry *bug; | 144 | struct bug_entry *bug; |
| 145 | const char *file; | 145 | const char *file; |
| 146 | unsigned line, warning; | 146 | unsigned line, warning, once, done; |
| 147 | 147 | ||
| 148 | if (!is_valid_bugaddr(bugaddr)) | 148 | if (!is_valid_bugaddr(bugaddr)) |
| 149 | return BUG_TRAP_TYPE_NONE; | 149 | return BUG_TRAP_TYPE_NONE; |
| @@ -164,6 +164,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
| 164 | line = bug->line; | 164 | line = bug->line; |
| 165 | #endif | 165 | #endif |
| 166 | warning = (bug->flags & BUGFLAG_WARNING) != 0; | 166 | warning = (bug->flags & BUGFLAG_WARNING) != 0; |
| 167 | once = (bug->flags & BUGFLAG_ONCE) != 0; | ||
| 168 | done = (bug->flags & BUGFLAG_DONE) != 0; | ||
| 169 | |||
| 170 | if (warning && once) { | ||
| 171 | if (done) | ||
| 172 | return BUG_TRAP_TYPE_WARN; | ||
| 173 | |||
| 174 | /* | ||
| 175 | * Since this is the only store, concurrency is not an issue. | ||
| 176 | */ | ||
| 177 | bug->flags |= BUGFLAG_DONE; | ||
| 178 | } | ||
| 167 | } | 179 | } |
| 168 | 180 | ||
| 169 | if (warning) { | 181 | if (warning) { |
diff --git a/lib/cmdline.c b/lib/cmdline.c index 8f13cf73c2ec..3c6432df7e63 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/export.h> | 15 | #include <linux/export.h> |
| 16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
| 17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
| 18 | #include <linux/ctype.h> | ||
| 18 | 19 | ||
| 19 | /* | 20 | /* |
| 20 | * If a hyphen was found in get_option, this will handle the | 21 | * If a hyphen was found in get_option, this will handle the |
| @@ -189,3 +190,59 @@ bool parse_option_str(const char *str, const char *option) | |||
| 189 | 190 | ||
| 190 | return false; | 191 | return false; |
| 191 | } | 192 | } |
| 193 | |||
| 194 | /* | ||
| 195 | * Parse a string to get a param value pair. | ||
| 196 | * You can use " around spaces, but can't escape ". | ||
| 197 | * Hyphens and underscores equivalent in parameter names. | ||
| 198 | */ | ||
| 199 | char *next_arg(char *args, char **param, char **val) | ||
| 200 | { | ||
| 201 | unsigned int i, equals = 0; | ||
| 202 | int in_quote = 0, quoted = 0; | ||
| 203 | char *next; | ||
| 204 | |||
| 205 | if (*args == '"') { | ||
| 206 | args++; | ||
| 207 | in_quote = 1; | ||
| 208 | quoted = 1; | ||
| 209 | } | ||
| 210 | |||
| 211 | for (i = 0; args[i]; i++) { | ||
| 212 | if (isspace(args[i]) && !in_quote) | ||
| 213 | break; | ||
| 214 | if (equals == 0) { | ||
| 215 | if (args[i] == '=') | ||
| 216 | equals = i; | ||
| 217 | } | ||
| 218 | if (args[i] == '"') | ||
| 219 | in_quote = !in_quote; | ||
| 220 | } | ||
| 221 | |||
| 222 | *param = args; | ||
| 223 | if (!equals) | ||
| 224 | *val = NULL; | ||
| 225 | else { | ||
| 226 | args[equals] = '\0'; | ||
| 227 | *val = args + equals + 1; | ||
| 228 | |||
| 229 | /* Don't include quotes in value. */ | ||
| 230 | if (**val == '"') { | ||
| 231 | (*val)++; | ||
| 232 | if (args[i-1] == '"') | ||
| 233 | args[i-1] = '\0'; | ||
| 234 | } | ||
| 235 | } | ||
| 236 | if (quoted && args[i-1] == '"') | ||
| 237 | args[i-1] = '\0'; | ||
| 238 | |||
| 239 | if (args[i]) { | ||
| 240 | args[i] = '\0'; | ||
| 241 | next = args + i + 1; | ||
| 242 | } else | ||
| 243 | next = args + i; | ||
| 244 | |||
| 245 | /* Chew up trailing spaces. */ | ||
| 246 | return skip_spaces(next); | ||
| 247 | //return next; | ||
| 248 | } | ||
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index e68604ae3ced..4952311422c1 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
| @@ -413,7 +413,7 @@ void iov_iter_init(struct iov_iter *i, int direction, | |||
| 413 | size_t count) | 413 | size_t count) |
| 414 | { | 414 | { |
| 415 | /* It will get better. Eventually... */ | 415 | /* It will get better. Eventually... */ |
| 416 | if (segment_eq(get_fs(), KERNEL_DS)) { | 416 | if (uaccess_kernel()) { |
| 417 | direction |= ITER_KVEC; | 417 | direction |= ITER_KVEC; |
| 418 | i->type = direction; | 418 | i->type = direction; |
| 419 | i->kvec = (struct kvec *)iov; | 419 | i->kvec = (struct kvec *)iov; |
| @@ -604,7 +604,7 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) | |||
| 604 | return 0; | 604 | return 0; |
| 605 | } | 605 | } |
| 606 | iterate_and_advance(i, bytes, v, | 606 | iterate_and_advance(i, bytes, v, |
| 607 | __copy_from_user_nocache((to += v.iov_len) - v.iov_len, | 607 | __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, |
| 608 | v.iov_base, v.iov_len), | 608 | v.iov_base, v.iov_len), |
| 609 | memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, | 609 | memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, |
| 610 | v.bv_offset, v.bv_len), | 610 | v.bv_offset, v.bv_len), |
| @@ -625,7 +625,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) | |||
| 625 | if (unlikely(i->count < bytes)) | 625 | if (unlikely(i->count < bytes)) |
| 626 | return false; | 626 | return false; |
| 627 | iterate_all_kinds(i, bytes, v, ({ | 627 | iterate_all_kinds(i, bytes, v, ({ |
| 628 | if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len, | 628 | if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, |
| 629 | v.iov_base, v.iov_len)) | 629 | v.iov_base, v.iov_len)) |
| 630 | return false; | 630 | return false; |
| 631 | 0;}), | 631 | 0;}), |
| @@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size) | |||
| 786 | } | 786 | } |
| 787 | EXPORT_SYMBOL(iov_iter_advance); | 787 | EXPORT_SYMBOL(iov_iter_advance); |
| 788 | 788 | ||
| 789 | void iov_iter_revert(struct iov_iter *i, size_t unroll) | ||
| 790 | { | ||
| 791 | if (!unroll) | ||
| 792 | return; | ||
| 793 | i->count += unroll; | ||
| 794 | if (unlikely(i->type & ITER_PIPE)) { | ||
| 795 | struct pipe_inode_info *pipe = i->pipe; | ||
| 796 | int idx = i->idx; | ||
| 797 | size_t off = i->iov_offset; | ||
| 798 | while (1) { | ||
| 799 | size_t n = off - pipe->bufs[idx].offset; | ||
| 800 | if (unroll < n) { | ||
| 801 | off -= unroll; | ||
| 802 | break; | ||
| 803 | } | ||
| 804 | unroll -= n; | ||
| 805 | if (!unroll && idx == i->start_idx) { | ||
| 806 | off = 0; | ||
| 807 | break; | ||
| 808 | } | ||
| 809 | if (!idx--) | ||
| 810 | idx = pipe->buffers - 1; | ||
| 811 | off = pipe->bufs[idx].offset + pipe->bufs[idx].len; | ||
| 812 | } | ||
| 813 | i->iov_offset = off; | ||
| 814 | i->idx = idx; | ||
| 815 | pipe_truncate(i); | ||
| 816 | return; | ||
| 817 | } | ||
| 818 | if (unroll <= i->iov_offset) { | ||
| 819 | i->iov_offset -= unroll; | ||
| 820 | return; | ||
| 821 | } | ||
| 822 | unroll -= i->iov_offset; | ||
| 823 | if (i->type & ITER_BVEC) { | ||
| 824 | const struct bio_vec *bvec = i->bvec; | ||
| 825 | while (1) { | ||
| 826 | size_t n = (--bvec)->bv_len; | ||
| 827 | i->nr_segs++; | ||
| 828 | if (unroll <= n) { | ||
| 829 | i->bvec = bvec; | ||
| 830 | i->iov_offset = n - unroll; | ||
| 831 | return; | ||
| 832 | } | ||
| 833 | unroll -= n; | ||
| 834 | } | ||
| 835 | } else { /* same logics for iovec and kvec */ | ||
| 836 | const struct iovec *iov = i->iov; | ||
| 837 | while (1) { | ||
| 838 | size_t n = (--iov)->iov_len; | ||
| 839 | i->nr_segs++; | ||
| 840 | if (unroll <= n) { | ||
| 841 | i->iov = iov; | ||
| 842 | i->iov_offset = n - unroll; | ||
| 843 | return; | ||
| 844 | } | ||
| 845 | unroll -= n; | ||
| 846 | } | ||
| 847 | } | ||
| 848 | } | ||
| 849 | EXPORT_SYMBOL(iov_iter_revert); | ||
| 850 | |||
| 789 | /* | 851 | /* |
| 790 | * Return the count of just the current iov_iter segment. | 852 | * Return the count of just the current iov_iter segment. |
| 791 | */ | 853 | */ |
| @@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction, | |||
| 839 | i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); | 901 | i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); |
| 840 | i->iov_offset = 0; | 902 | i->iov_offset = 0; |
| 841 | i->count = count; | 903 | i->count = count; |
| 904 | i->start_idx = i->idx; | ||
| 842 | } | 905 | } |
| 843 | EXPORT_SYMBOL(iov_iter_pipe); | 906 | EXPORT_SYMBOL(iov_iter_pipe); |
| 844 | 907 | ||
diff --git a/lib/kobject.c b/lib/kobject.c index 445dcaeb0f56..763d70a18941 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
| @@ -601,12 +601,15 @@ struct kobject *kobject_get(struct kobject *kobj) | |||
| 601 | } | 601 | } |
| 602 | EXPORT_SYMBOL(kobject_get); | 602 | EXPORT_SYMBOL(kobject_get); |
| 603 | 603 | ||
| 604 | static struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj) | 604 | struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj) |
| 605 | { | 605 | { |
| 606 | if (!kobj) | ||
| 607 | return NULL; | ||
| 606 | if (!kref_get_unless_zero(&kobj->kref)) | 608 | if (!kref_get_unless_zero(&kobj->kref)) |
| 607 | kobj = NULL; | 609 | kobj = NULL; |
| 608 | return kobj; | 610 | return kobj; |
| 609 | } | 611 | } |
| 612 | EXPORT_SYMBOL(kobject_get_unless_zero); | ||
| 610 | 613 | ||
| 611 | /* | 614 | /* |
| 612 | * kobject_cleanup - free kobject resources. | 615 | * kobject_cleanup - free kobject resources. |
diff --git a/lib/md5.c b/lib/md5.c deleted file mode 100644 index bb0cd01d356d..000000000000 --- a/lib/md5.c +++ /dev/null | |||
| @@ -1,95 +0,0 @@ | |||
| 1 | #include <linux/compiler.h> | ||
| 2 | #include <linux/export.h> | ||
| 3 | #include <linux/cryptohash.h> | ||
| 4 | |||
| 5 | #define F1(x, y, z) (z ^ (x & (y ^ z))) | ||
| 6 | #define F2(x, y, z) F1(z, x, y) | ||
| 7 | #define F3(x, y, z) (x ^ y ^ z) | ||
| 8 | #define F4(x, y, z) (y ^ (x | ~z)) | ||
| 9 | |||
| 10 | #define MD5STEP(f, w, x, y, z, in, s) \ | ||
| 11 | (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x) | ||
| 12 | |||
| 13 | void md5_transform(__u32 *hash, __u32 const *in) | ||
| 14 | { | ||
| 15 | u32 a, b, c, d; | ||
| 16 | |||
| 17 | a = hash[0]; | ||
| 18 | b = hash[1]; | ||
| 19 | c = hash[2]; | ||
| 20 | d = hash[3]; | ||
| 21 | |||
| 22 | MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); | ||
| 23 | MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); | ||
| 24 | MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); | ||
| 25 | MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); | ||
| 26 | MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); | ||
| 27 | MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); | ||
| 28 | MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); | ||
| 29 | MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); | ||
| 30 | MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); | ||
| 31 | MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); | ||
| 32 | MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); | ||
| 33 | MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); | ||
| 34 | MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); | ||
| 35 | MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); | ||
| 36 | MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); | ||
| 37 | MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); | ||
| 38 | |||
| 39 | MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); | ||
| 40 | MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); | ||
| 41 | MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); | ||
| 42 | MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); | ||
| 43 | MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); | ||
| 44 | MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); | ||
| 45 | MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); | ||
| 46 | MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); | ||
| 47 | MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); | ||
| 48 | MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); | ||
| 49 | MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); | ||
| 50 | MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); | ||
| 51 | MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); | ||
| 52 | MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); | ||
| 53 | MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); | ||
| 54 | MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); | ||
| 55 | |||
| 56 | MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); | ||
| 57 | MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); | ||
| 58 | MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); | ||
| 59 | MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); | ||
| 60 | MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); | ||
| 61 | MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); | ||
| 62 | MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); | ||
| 63 | MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); | ||
| 64 | MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); | ||
| 65 | MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); | ||
| 66 | MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); | ||
| 67 | MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); | ||
| 68 | MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); | ||
| 69 | MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); | ||
| 70 | MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); | ||
| 71 | MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); | ||
| 72 | |||
| 73 | MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); | ||
| 74 | MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); | ||
| 75 | MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); | ||
| 76 | MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); | ||
| 77 | MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); | ||
| 78 | MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); | ||
| 79 | MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); | ||
| 80 | MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); | ||
| 81 | MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); | ||
| 82 | MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); | ||
| 83 | MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); | ||
| 84 | MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); | ||
| 85 | MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); | ||
| 86 | MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); | ||
| 87 | MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); | ||
| 88 | MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); | ||
| 89 | |||
| 90 | hash[0] += a; | ||
| 91 | hash[1] += b; | ||
| 92 | hash[2] += c; | ||
| 93 | hash[3] += d; | ||
| 94 | } | ||
| 95 | EXPORT_SYMBOL(md5_transform); | ||
diff --git a/lib/nlattr.c b/lib/nlattr.c index b42b8577fc23..a7e0b16078df 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c | |||
| @@ -112,6 +112,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype, | |||
| 112 | * @len: length of attribute stream | 112 | * @len: length of attribute stream |
| 113 | * @maxtype: maximum attribute type to be expected | 113 | * @maxtype: maximum attribute type to be expected |
| 114 | * @policy: validation policy | 114 | * @policy: validation policy |
| 115 | * @extack: extended ACK report struct | ||
| 115 | * | 116 | * |
| 116 | * Validates all attributes in the specified attribute stream against the | 117 | * Validates all attributes in the specified attribute stream against the |
| 117 | * specified policy. Attributes with a type exceeding maxtype will be | 118 | * specified policy. Attributes with a type exceeding maxtype will be |
| @@ -120,20 +121,23 @@ static int validate_nla(const struct nlattr *nla, int maxtype, | |||
| 120 | * Returns 0 on success or a negative error code. | 121 | * Returns 0 on success or a negative error code. |
| 121 | */ | 122 | */ |
| 122 | int nla_validate(const struct nlattr *head, int len, int maxtype, | 123 | int nla_validate(const struct nlattr *head, int len, int maxtype, |
| 123 | const struct nla_policy *policy) | 124 | const struct nla_policy *policy, |
| 125 | struct netlink_ext_ack *extack) | ||
| 124 | { | 126 | { |
| 125 | const struct nlattr *nla; | 127 | const struct nlattr *nla; |
| 126 | int rem, err; | 128 | int rem; |
| 127 | 129 | ||
| 128 | nla_for_each_attr(nla, head, len, rem) { | 130 | nla_for_each_attr(nla, head, len, rem) { |
| 129 | err = validate_nla(nla, maxtype, policy); | 131 | int err = validate_nla(nla, maxtype, policy); |
| 130 | if (err < 0) | 132 | |
| 131 | goto errout; | 133 | if (err < 0) { |
| 134 | if (extack) | ||
| 135 | extack->bad_attr = nla; | ||
| 136 | return err; | ||
| 137 | } | ||
| 132 | } | 138 | } |
| 133 | 139 | ||
| 134 | err = 0; | 140 | return 0; |
| 135 | errout: | ||
| 136 | return err; | ||
| 137 | } | 141 | } |
| 138 | EXPORT_SYMBOL(nla_validate); | 142 | EXPORT_SYMBOL(nla_validate); |
| 139 | 143 | ||
| @@ -180,7 +184,8 @@ EXPORT_SYMBOL(nla_policy_len); | |||
| 180 | * Returns 0 on success or a negative error code. | 184 | * Returns 0 on success or a negative error code. |
| 181 | */ | 185 | */ |
| 182 | int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, | 186 | int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, |
| 183 | int len, const struct nla_policy *policy) | 187 | int len, const struct nla_policy *policy, |
| 188 | struct netlink_ext_ack *extack) | ||
| 184 | { | 189 | { |
| 185 | const struct nlattr *nla; | 190 | const struct nlattr *nla; |
| 186 | int rem, err; | 191 | int rem, err; |
| @@ -193,8 +198,11 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, | |||
| 193 | if (type > 0 && type <= maxtype) { | 198 | if (type > 0 && type <= maxtype) { |
| 194 | if (policy) { | 199 | if (policy) { |
| 195 | err = validate_nla(nla, maxtype, policy); | 200 | err = validate_nla(nla, maxtype, policy); |
| 196 | if (err < 0) | 201 | if (err < 0) { |
| 202 | if (extack) | ||
| 203 | extack->bad_attr = nla; | ||
| 197 | goto errout; | 204 | goto errout; |
| 205 | } | ||
| 198 | } | 206 | } |
| 199 | 207 | ||
| 200 | tb[type] = (struct nlattr *)nla; | 208 | tb[type] = (struct nlattr *)nla; |
diff --git a/lib/refcount.c b/lib/refcount.c index aa09ad3c30b0..f42124ccf295 100644 --- a/lib/refcount.c +++ b/lib/refcount.c | |||
| @@ -37,11 +37,29 @@ | |||
| 37 | #include <linux/refcount.h> | 37 | #include <linux/refcount.h> |
| 38 | #include <linux/bug.h> | 38 | #include <linux/bug.h> |
| 39 | 39 | ||
| 40 | /** | ||
| 41 | * refcount_add_not_zero - add a value to a refcount unless it is 0 | ||
| 42 | * @i: the value to add to the refcount | ||
| 43 | * @r: the refcount | ||
| 44 | * | ||
| 45 | * Will saturate at UINT_MAX and WARN. | ||
| 46 | * | ||
| 47 | * Provides no memory ordering, it is assumed the caller has guaranteed the | ||
| 48 | * object memory to be stable (RCU, etc.). It does provide a control dependency | ||
| 49 | * and thereby orders future stores. See the comment on top. | ||
| 50 | * | ||
| 51 | * Use of this function is not recommended for the normal reference counting | ||
| 52 | * use case in which references are taken and released one at a time. In these | ||
| 53 | * cases, refcount_inc(), or one of its variants, should instead be used to | ||
| 54 | * increment a reference count. | ||
| 55 | * | ||
| 56 | * Return: false if the passed refcount is 0, true otherwise | ||
| 57 | */ | ||
| 40 | bool refcount_add_not_zero(unsigned int i, refcount_t *r) | 58 | bool refcount_add_not_zero(unsigned int i, refcount_t *r) |
| 41 | { | 59 | { |
| 42 | unsigned int old, new, val = atomic_read(&r->refs); | 60 | unsigned int new, val = atomic_read(&r->refs); |
| 43 | 61 | ||
| 44 | for (;;) { | 62 | do { |
| 45 | if (!val) | 63 | if (!val) |
| 46 | return false; | 64 | return false; |
| 47 | 65 | ||
| @@ -51,12 +69,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) | |||
| 51 | new = val + i; | 69 | new = val + i; |
| 52 | if (new < val) | 70 | if (new < val) |
| 53 | new = UINT_MAX; | 71 | new = UINT_MAX; |
| 54 | old = atomic_cmpxchg_relaxed(&r->refs, val, new); | ||
| 55 | if (old == val) | ||
| 56 | break; | ||
| 57 | 72 | ||
| 58 | val = old; | 73 | } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); |
| 59 | } | ||
| 60 | 74 | ||
| 61 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); | 75 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
| 62 | 76 | ||
| @@ -64,24 +78,45 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) | |||
| 64 | } | 78 | } |
| 65 | EXPORT_SYMBOL_GPL(refcount_add_not_zero); | 79 | EXPORT_SYMBOL_GPL(refcount_add_not_zero); |
| 66 | 80 | ||
| 81 | /** | ||
| 82 | * refcount_add - add a value to a refcount | ||
| 83 | * @i: the value to add to the refcount | ||
| 84 | * @r: the refcount | ||
| 85 | * | ||
| 86 | * Similar to atomic_add(), but will saturate at UINT_MAX and WARN. | ||
| 87 | * | ||
| 88 | * Provides no memory ordering, it is assumed the caller has guaranteed the | ||
| 89 | * object memory to be stable (RCU, etc.). It does provide a control dependency | ||
| 90 | * and thereby orders future stores. See the comment on top. | ||
| 91 | * | ||
| 92 | * Use of this function is not recommended for the normal reference counting | ||
| 93 | * use case in which references are taken and released one at a time. In these | ||
| 94 | * cases, refcount_inc(), or one of its variants, should instead be used to | ||
| 95 | * increment a reference count. | ||
| 96 | */ | ||
| 67 | void refcount_add(unsigned int i, refcount_t *r) | 97 | void refcount_add(unsigned int i, refcount_t *r) |
| 68 | { | 98 | { |
| 69 | WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); | 99 | WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n"); |
| 70 | } | 100 | } |
| 71 | EXPORT_SYMBOL_GPL(refcount_add); | 101 | EXPORT_SYMBOL_GPL(refcount_add); |
| 72 | 102 | ||
| 73 | /* | 103 | /** |
| 74 | * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN. | 104 | * refcount_inc_not_zero - increment a refcount unless it is 0 |
| 105 | * @r: the refcount to increment | ||
| 106 | * | ||
| 107 | * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN. | ||
| 75 | * | 108 | * |
| 76 | * Provides no memory ordering, it is assumed the caller has guaranteed the | 109 | * Provides no memory ordering, it is assumed the caller has guaranteed the |
| 77 | * object memory to be stable (RCU, etc.). It does provide a control dependency | 110 | * object memory to be stable (RCU, etc.). It does provide a control dependency |
| 78 | * and thereby orders future stores. See the comment on top. | 111 | * and thereby orders future stores. See the comment on top. |
| 112 | * | ||
| 113 | * Return: true if the increment was successful, false otherwise | ||
| 79 | */ | 114 | */ |
| 80 | bool refcount_inc_not_zero(refcount_t *r) | 115 | bool refcount_inc_not_zero(refcount_t *r) |
| 81 | { | 116 | { |
| 82 | unsigned int old, new, val = atomic_read(&r->refs); | 117 | unsigned int new, val = atomic_read(&r->refs); |
| 83 | 118 | ||
| 84 | for (;;) { | 119 | do { |
| 85 | new = val + 1; | 120 | new = val + 1; |
| 86 | 121 | ||
| 87 | if (!val) | 122 | if (!val) |
| @@ -90,12 +125,7 @@ bool refcount_inc_not_zero(refcount_t *r) | |||
| 90 | if (unlikely(!new)) | 125 | if (unlikely(!new)) |
| 91 | return true; | 126 | return true; |
| 92 | 127 | ||
| 93 | old = atomic_cmpxchg_relaxed(&r->refs, val, new); | 128 | } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); |
| 94 | if (old == val) | ||
| 95 | break; | ||
| 96 | |||
| 97 | val = old; | ||
| 98 | } | ||
| 99 | 129 | ||
| 100 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); | 130 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
| 101 | 131 | ||
| @@ -103,11 +133,17 @@ bool refcount_inc_not_zero(refcount_t *r) | |||
| 103 | } | 133 | } |
| 104 | EXPORT_SYMBOL_GPL(refcount_inc_not_zero); | 134 | EXPORT_SYMBOL_GPL(refcount_inc_not_zero); |
| 105 | 135 | ||
| 106 | /* | 136 | /** |
| 107 | * Similar to atomic_inc(), will saturate at UINT_MAX and WARN. | 137 | * refcount_inc - increment a refcount |
| 138 | * @r: the refcount to increment | ||
| 139 | * | ||
| 140 | * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN. | ||
| 108 | * | 141 | * |
| 109 | * Provides no memory ordering, it is assumed the caller already has a | 142 | * Provides no memory ordering, it is assumed the caller already has a |
| 110 | * reference on the object, will WARN when this is not so. | 143 | * reference on the object. |
| 144 | * | ||
| 145 | * Will WARN if the refcount is 0, as this represents a possible use-after-free | ||
| 146 | * condition. | ||
| 111 | */ | 147 | */ |
| 112 | void refcount_inc(refcount_t *r) | 148 | void refcount_inc(refcount_t *r) |
| 113 | { | 149 | { |
| @@ -115,11 +151,31 @@ void refcount_inc(refcount_t *r) | |||
| 115 | } | 151 | } |
| 116 | EXPORT_SYMBOL_GPL(refcount_inc); | 152 | EXPORT_SYMBOL_GPL(refcount_inc); |
| 117 | 153 | ||
| 154 | /** | ||
| 155 | * refcount_sub_and_test - subtract from a refcount and test if it is 0 | ||
| 156 | * @i: amount to subtract from the refcount | ||
| 157 | * @r: the refcount | ||
| 158 | * | ||
| 159 | * Similar to atomic_dec_and_test(), but it will WARN, return false and | ||
| 160 | * ultimately leak on underflow and will fail to decrement when saturated | ||
| 161 | * at UINT_MAX. | ||
| 162 | * | ||
| 163 | * Provides release memory ordering, such that prior loads and stores are done | ||
| 164 | * before, and provides a control dependency such that free() must come after. | ||
| 165 | * See the comment on top. | ||
| 166 | * | ||
| 167 | * Use of this function is not recommended for the normal reference counting | ||
| 168 | * use case in which references are taken and released one at a time. In these | ||
| 169 | * cases, refcount_dec(), or one of its variants, should instead be used to | ||
| 170 | * decrement a reference count. | ||
| 171 | * | ||
| 172 | * Return: true if the resulting refcount is 0, false otherwise | ||
| 173 | */ | ||
| 118 | bool refcount_sub_and_test(unsigned int i, refcount_t *r) | 174 | bool refcount_sub_and_test(unsigned int i, refcount_t *r) |
| 119 | { | 175 | { |
| 120 | unsigned int old, new, val = atomic_read(&r->refs); | 176 | unsigned int new, val = atomic_read(&r->refs); |
| 121 | 177 | ||
| 122 | for (;;) { | 178 | do { |
| 123 | if (unlikely(val == UINT_MAX)) | 179 | if (unlikely(val == UINT_MAX)) |
| 124 | return false; | 180 | return false; |
| 125 | 181 | ||
| @@ -129,24 +185,24 @@ bool refcount_sub_and_test(unsigned int i, refcount_t *r) | |||
| 129 | return false; | 185 | return false; |
| 130 | } | 186 | } |
| 131 | 187 | ||
| 132 | old = atomic_cmpxchg_release(&r->refs, val, new); | 188 | } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
| 133 | if (old == val) | ||
| 134 | break; | ||
| 135 | |||
| 136 | val = old; | ||
| 137 | } | ||
| 138 | 189 | ||
| 139 | return !new; | 190 | return !new; |
| 140 | } | 191 | } |
| 141 | EXPORT_SYMBOL_GPL(refcount_sub_and_test); | 192 | EXPORT_SYMBOL_GPL(refcount_sub_and_test); |
| 142 | 193 | ||
| 143 | /* | 194 | /** |
| 195 | * refcount_dec_and_test - decrement a refcount and test if it is 0 | ||
| 196 | * @r: the refcount | ||
| 197 | * | ||
| 144 | * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to | 198 | * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to |
| 145 | * decrement when saturated at UINT_MAX. | 199 | * decrement when saturated at UINT_MAX. |
| 146 | * | 200 | * |
| 147 | * Provides release memory ordering, such that prior loads and stores are done | 201 | * Provides release memory ordering, such that prior loads and stores are done |
| 148 | * before, and provides a control dependency such that free() must come after. | 202 | * before, and provides a control dependency such that free() must come after. |
| 149 | * See the comment on top. | 203 | * See the comment on top. |
| 204 | * | ||
| 205 | * Return: true if the resulting refcount is 0, false otherwise | ||
| 150 | */ | 206 | */ |
| 151 | bool refcount_dec_and_test(refcount_t *r) | 207 | bool refcount_dec_and_test(refcount_t *r) |
| 152 | { | 208 | { |
| @@ -154,21 +210,26 @@ bool refcount_dec_and_test(refcount_t *r) | |||
| 154 | } | 210 | } |
| 155 | EXPORT_SYMBOL_GPL(refcount_dec_and_test); | 211 | EXPORT_SYMBOL_GPL(refcount_dec_and_test); |
| 156 | 212 | ||
| 157 | /* | 213 | /** |
| 214 | * refcount_dec - decrement a refcount | ||
| 215 | * @r: the refcount | ||
| 216 | * | ||
| 158 | * Similar to atomic_dec(), it will WARN on underflow and fail to decrement | 217 | * Similar to atomic_dec(), it will WARN on underflow and fail to decrement |
| 159 | * when saturated at UINT_MAX. | 218 | * when saturated at UINT_MAX. |
| 160 | * | 219 | * |
| 161 | * Provides release memory ordering, such that prior loads and stores are done | 220 | * Provides release memory ordering, such that prior loads and stores are done |
| 162 | * before. | 221 | * before. |
| 163 | */ | 222 | */ |
| 164 | |||
| 165 | void refcount_dec(refcount_t *r) | 223 | void refcount_dec(refcount_t *r) |
| 166 | { | 224 | { |
| 167 | WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); | 225 | WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); |
| 168 | } | 226 | } |
| 169 | EXPORT_SYMBOL_GPL(refcount_dec); | 227 | EXPORT_SYMBOL_GPL(refcount_dec); |
| 170 | 228 | ||
| 171 | /* | 229 | /** |
| 230 | * refcount_dec_if_one - decrement a refcount if it is 1 | ||
| 231 | * @r: the refcount | ||
| 232 | * | ||
| 172 | * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the | 233 | * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the |
| 173 | * success thereof. | 234 | * success thereof. |
| 174 | * | 235 | * |
| @@ -178,24 +239,33 @@ EXPORT_SYMBOL_GPL(refcount_dec); | |||
| 178 | * It can be used like a try-delete operator; this explicit case is provided | 239 | * It can be used like a try-delete operator; this explicit case is provided |
| 179 | * and not cmpxchg in generic, because that would allow implementing unsafe | 240 | * and not cmpxchg in generic, because that would allow implementing unsafe |
| 180 | * operations. | 241 | * operations. |
| 242 | * | ||
| 243 | * Return: true if the resulting refcount is 0, false otherwise | ||
| 181 | */ | 244 | */ |
| 182 | bool refcount_dec_if_one(refcount_t *r) | 245 | bool refcount_dec_if_one(refcount_t *r) |
| 183 | { | 246 | { |
| 184 | return atomic_cmpxchg_release(&r->refs, 1, 0) == 1; | 247 | int val = 1; |
| 248 | |||
| 249 | return atomic_try_cmpxchg_release(&r->refs, &val, 0); | ||
| 185 | } | 250 | } |
| 186 | EXPORT_SYMBOL_GPL(refcount_dec_if_one); | 251 | EXPORT_SYMBOL_GPL(refcount_dec_if_one); |
| 187 | 252 | ||
| 188 | /* | 253 | /** |
| 254 | * refcount_dec_not_one - decrement a refcount if it is not 1 | ||
| 255 | * @r: the refcount | ||
| 256 | * | ||
| 189 | * No atomic_t counterpart, it decrements unless the value is 1, in which case | 257 | * No atomic_t counterpart, it decrements unless the value is 1, in which case |
| 190 | * it will return false. | 258 | * it will return false. |
| 191 | * | 259 | * |
| 192 | * Was often done like: atomic_add_unless(&var, -1, 1) | 260 | * Was often done like: atomic_add_unless(&var, -1, 1) |
| 261 | * | ||
| 262 | * Return: true if the decrement operation was successful, false otherwise | ||
| 193 | */ | 263 | */ |
| 194 | bool refcount_dec_not_one(refcount_t *r) | 264 | bool refcount_dec_not_one(refcount_t *r) |
| 195 | { | 265 | { |
| 196 | unsigned int old, new, val = atomic_read(&r->refs); | 266 | unsigned int new, val = atomic_read(&r->refs); |
| 197 | 267 | ||
| 198 | for (;;) { | 268 | do { |
| 199 | if (unlikely(val == UINT_MAX)) | 269 | if (unlikely(val == UINT_MAX)) |
| 200 | return true; | 270 | return true; |
| 201 | 271 | ||
| @@ -208,24 +278,27 @@ bool refcount_dec_not_one(refcount_t *r) | |||
| 208 | return true; | 278 | return true; |
| 209 | } | 279 | } |
| 210 | 280 | ||
| 211 | old = atomic_cmpxchg_release(&r->refs, val, new); | 281 | } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
| 212 | if (old == val) | ||
| 213 | break; | ||
| 214 | |||
| 215 | val = old; | ||
| 216 | } | ||
| 217 | 282 | ||
| 218 | return true; | 283 | return true; |
| 219 | } | 284 | } |
| 220 | EXPORT_SYMBOL_GPL(refcount_dec_not_one); | 285 | EXPORT_SYMBOL_GPL(refcount_dec_not_one); |
| 221 | 286 | ||
| 222 | /* | 287 | /** |
| 288 | * refcount_dec_and_mutex_lock - return holding mutex if able to decrement | ||
| 289 | * refcount to 0 | ||
| 290 | * @r: the refcount | ||
| 291 | * @lock: the mutex to be locked | ||
| 292 | * | ||
| 223 | * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail | 293 | * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail |
| 224 | * to decrement when saturated at UINT_MAX. | 294 | * to decrement when saturated at UINT_MAX. |
| 225 | * | 295 | * |
| 226 | * Provides release memory ordering, such that prior loads and stores are done | 296 | * Provides release memory ordering, such that prior loads and stores are done |
| 227 | * before, and provides a control dependency such that free() must come after. | 297 | * before, and provides a control dependency such that free() must come after. |
| 228 | * See the comment on top. | 298 | * See the comment on top. |
| 299 | * | ||
| 300 | * Return: true and hold mutex if able to decrement refcount to 0, false | ||
| 301 | * otherwise | ||
| 229 | */ | 302 | */ |
| 230 | bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) | 303 | bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) |
| 231 | { | 304 | { |
| @@ -242,13 +315,21 @@ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) | |||
| 242 | } | 315 | } |
| 243 | EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock); | 316 | EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock); |
| 244 | 317 | ||
| 245 | /* | 318 | /** |
| 319 | * refcount_dec_and_lock - return holding spinlock if able to decrement | ||
| 320 | * refcount to 0 | ||
| 321 | * @r: the refcount | ||
| 322 | * @lock: the spinlock to be locked | ||
| 323 | * | ||
| 246 | * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to | 324 | * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to |
| 247 | * decrement when saturated at UINT_MAX. | 325 | * decrement when saturated at UINT_MAX. |
| 248 | * | 326 | * |
| 249 | * Provides release memory ordering, such that prior loads and stores are done | 327 | * Provides release memory ordering, such that prior loads and stores are done |
| 250 | * before, and provides a control dependency such that free() must come after. | 328 | * before, and provides a control dependency such that free() must come after. |
| 251 | * See the comment on top. | 329 | * See the comment on top. |
| 330 | * | ||
| 331 | * Return: true and hold spinlock if able to decrement refcount to 0, false | ||
| 332 | * otherwise | ||
| 252 | */ | 333 | */ |
| 253 | bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) | 334 | bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) |
| 254 | { | 335 | { |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index f8635fd57442..a930e436db5d 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -535,7 +535,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, | |||
| 535 | struct rhash_head *head; | 535 | struct rhash_head *head; |
| 536 | int elasticity; | 536 | int elasticity; |
| 537 | 537 | ||
| 538 | elasticity = ht->elasticity; | 538 | elasticity = RHT_ELASTICITY; |
| 539 | pprev = rht_bucket_var(tbl, hash); | 539 | pprev = rht_bucket_var(tbl, hash); |
| 540 | rht_for_each_continue(head, *pprev, tbl, hash) { | 540 | rht_for_each_continue(head, *pprev, tbl, hash) { |
| 541 | struct rhlist_head *list; | 541 | struct rhlist_head *list; |
| @@ -958,35 +958,20 @@ int rhashtable_init(struct rhashtable *ht, | |||
| 958 | if (params->min_size) | 958 | if (params->min_size) |
| 959 | ht->p.min_size = roundup_pow_of_two(params->min_size); | 959 | ht->p.min_size = roundup_pow_of_two(params->min_size); |
| 960 | 960 | ||
| 961 | if (params->max_size) | 961 | /* Cap total entries at 2^31 to avoid nelems overflow. */ |
| 962 | ht->p.max_size = rounddown_pow_of_two(params->max_size); | 962 | ht->max_elems = 1u << 31; |
| 963 | 963 | ||
| 964 | if (params->insecure_max_entries) | 964 | if (params->max_size) { |
| 965 | ht->p.insecure_max_entries = | 965 | ht->p.max_size = rounddown_pow_of_two(params->max_size); |
| 966 | rounddown_pow_of_two(params->insecure_max_entries); | 966 | if (ht->p.max_size < ht->max_elems / 2) |
| 967 | else | 967 | ht->max_elems = ht->p.max_size * 2; |
| 968 | ht->p.insecure_max_entries = ht->p.max_size * 2; | 968 | } |
| 969 | 969 | ||
| 970 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); | 970 | ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); |
| 971 | 971 | ||
| 972 | if (params->nelem_hint) | 972 | if (params->nelem_hint) |
| 973 | size = rounded_hashtable_size(&ht->p); | 973 | size = rounded_hashtable_size(&ht->p); |
| 974 | 974 | ||
| 975 | /* The maximum (not average) chain length grows with the | ||
| 976 | * size of the hash table, at a rate of (log N)/(log log N). | ||
| 977 | * The value of 16 is selected so that even if the hash | ||
| 978 | * table grew to 2^32 you would not expect the maximum | ||
| 979 | * chain length to exceed it unless we are under attack | ||
| 980 | * (or extremely unlucky). | ||
| 981 | * | ||
| 982 | * As this limit is only to detect attacks, we don't need | ||
| 983 | * to set it to a lower value as you'd need the chain | ||
| 984 | * length to vastly exceed 16 to have any real effect | ||
| 985 | * on the system. | ||
| 986 | */ | ||
| 987 | if (!params->insecure_elasticity) | ||
| 988 | ht->elasticity = 16; | ||
| 989 | |||
| 990 | if (params->locks_mul) | 975 | if (params->locks_mul) |
| 991 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); | 976 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); |
| 992 | else | 977 | else |
diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 60e800e0b5a0..80aa8d5463fa 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c | |||
| @@ -79,15 +79,15 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth) | |||
| 79 | } | 79 | } |
| 80 | EXPORT_SYMBOL_GPL(sbitmap_resize); | 80 | EXPORT_SYMBOL_GPL(sbitmap_resize); |
| 81 | 81 | ||
| 82 | static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint, | 82 | static int __sbitmap_get_word(unsigned long *word, unsigned long depth, |
| 83 | bool wrap) | 83 | unsigned int hint, bool wrap) |
| 84 | { | 84 | { |
| 85 | unsigned int orig_hint = hint; | 85 | unsigned int orig_hint = hint; |
| 86 | int nr; | 86 | int nr; |
| 87 | 87 | ||
| 88 | while (1) { | 88 | while (1) { |
| 89 | nr = find_next_zero_bit(&word->word, word->depth, hint); | 89 | nr = find_next_zero_bit(word, depth, hint); |
| 90 | if (unlikely(nr >= word->depth)) { | 90 | if (unlikely(nr >= depth)) { |
| 91 | /* | 91 | /* |
| 92 | * We started with an offset, and we didn't reset the | 92 | * We started with an offset, and we didn't reset the |
| 93 | * offset to 0 in a failure case, so start from 0 to | 93 | * offset to 0 in a failure case, so start from 0 to |
| @@ -100,11 +100,11 @@ static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint, | |||
| 100 | return -1; | 100 | return -1; |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | if (!test_and_set_bit(nr, &word->word)) | 103 | if (!test_and_set_bit(nr, word)) |
| 104 | break; | 104 | break; |
| 105 | 105 | ||
| 106 | hint = nr + 1; | 106 | hint = nr + 1; |
| 107 | if (hint >= word->depth - 1) | 107 | if (hint >= depth - 1) |
| 108 | hint = 0; | 108 | hint = 0; |
| 109 | } | 109 | } |
| 110 | 110 | ||
| @@ -119,7 +119,8 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) | |||
| 119 | index = SB_NR_TO_INDEX(sb, alloc_hint); | 119 | index = SB_NR_TO_INDEX(sb, alloc_hint); |
| 120 | 120 | ||
| 121 | for (i = 0; i < sb->map_nr; i++) { | 121 | for (i = 0; i < sb->map_nr; i++) { |
| 122 | nr = __sbitmap_get_word(&sb->map[index], | 122 | nr = __sbitmap_get_word(&sb->map[index].word, |
| 123 | sb->map[index].depth, | ||
| 123 | SB_NR_TO_BIT(sb, alloc_hint), | 124 | SB_NR_TO_BIT(sb, alloc_hint), |
| 124 | !round_robin); | 125 | !round_robin); |
| 125 | if (nr != -1) { | 126 | if (nr != -1) { |
| @@ -141,6 +142,37 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) | |||
| 141 | } | 142 | } |
| 142 | EXPORT_SYMBOL_GPL(sbitmap_get); | 143 | EXPORT_SYMBOL_GPL(sbitmap_get); |
| 143 | 144 | ||
| 145 | int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, | ||
| 146 | unsigned long shallow_depth) | ||
| 147 | { | ||
| 148 | unsigned int i, index; | ||
| 149 | int nr = -1; | ||
| 150 | |||
| 151 | index = SB_NR_TO_INDEX(sb, alloc_hint); | ||
| 152 | |||
| 153 | for (i = 0; i < sb->map_nr; i++) { | ||
| 154 | nr = __sbitmap_get_word(&sb->map[index].word, | ||
| 155 | min(sb->map[index].depth, shallow_depth), | ||
| 156 | SB_NR_TO_BIT(sb, alloc_hint), true); | ||
| 157 | if (nr != -1) { | ||
| 158 | nr += index << sb->shift; | ||
| 159 | break; | ||
| 160 | } | ||
| 161 | |||
| 162 | /* Jump to next index. */ | ||
| 163 | index++; | ||
| 164 | alloc_hint = index << sb->shift; | ||
| 165 | |||
| 166 | if (index >= sb->map_nr) { | ||
| 167 | index = 0; | ||
| 168 | alloc_hint = 0; | ||
| 169 | } | ||
| 170 | } | ||
| 171 | |||
| 172 | return nr; | ||
| 173 | } | ||
| 174 | EXPORT_SYMBOL_GPL(sbitmap_get_shallow); | ||
| 175 | |||
| 144 | bool sbitmap_any_bit_set(const struct sbitmap *sb) | 176 | bool sbitmap_any_bit_set(const struct sbitmap *sb) |
| 145 | { | 177 | { |
| 146 | unsigned int i; | 178 | unsigned int i; |
| @@ -342,6 +374,35 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq) | |||
| 342 | } | 374 | } |
| 343 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get); | 375 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get); |
| 344 | 376 | ||
| 377 | int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, | ||
| 378 | unsigned int shallow_depth) | ||
| 379 | { | ||
| 380 | unsigned int hint, depth; | ||
| 381 | int nr; | ||
| 382 | |||
| 383 | hint = this_cpu_read(*sbq->alloc_hint); | ||
| 384 | depth = READ_ONCE(sbq->sb.depth); | ||
| 385 | if (unlikely(hint >= depth)) { | ||
| 386 | hint = depth ? prandom_u32() % depth : 0; | ||
| 387 | this_cpu_write(*sbq->alloc_hint, hint); | ||
| 388 | } | ||
| 389 | nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth); | ||
| 390 | |||
| 391 | if (nr == -1) { | ||
| 392 | /* If the map is full, a hint won't do us much good. */ | ||
| 393 | this_cpu_write(*sbq->alloc_hint, 0); | ||
| 394 | } else if (nr == hint || unlikely(sbq->round_robin)) { | ||
| 395 | /* Only update the hint if we used it. */ | ||
| 396 | hint = nr + 1; | ||
| 397 | if (hint >= depth - 1) | ||
| 398 | hint = 0; | ||
| 399 | this_cpu_write(*sbq->alloc_hint, hint); | ||
| 400 | } | ||
| 401 | |||
| 402 | return nr; | ||
| 403 | } | ||
| 404 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); | ||
| 405 | |||
| 345 | static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) | 406 | static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) |
| 346 | { | 407 | { |
| 347 | int i, wake_index; | 408 | int i, wake_index; |
diff --git a/lib/string.c b/lib/string.c index ed83562a53ae..b5c9a1168d3a 100644 --- a/lib/string.c +++ b/lib/string.c | |||
| @@ -131,7 +131,7 @@ EXPORT_SYMBOL(strncpy); | |||
| 131 | * @src: Where to copy the string from | 131 | * @src: Where to copy the string from |
| 132 | * @size: size of destination buffer | 132 | * @size: size of destination buffer |
| 133 | * | 133 | * |
| 134 | * Compatible with *BSD: the result is always a valid | 134 | * Compatible with ``*BSD``: the result is always a valid |
| 135 | * NUL-terminated string that fits in the buffer (unless, | 135 | * NUL-terminated string that fits in the buffer (unless, |
| 136 | * of course, the buffer size is zero). It does not pad | 136 | * of course, the buffer size is zero). It does not pad |
| 137 | * out the result like strncpy() does. | 137 | * out the result like strncpy() does. |
diff --git a/lib/syscall.c b/lib/syscall.c index 17d5ff5fa6a3..2c6cd1b5c3ea 100644 --- a/lib/syscall.c +++ b/lib/syscall.c | |||
| @@ -12,6 +12,7 @@ static int collect_syscall(struct task_struct *target, long *callno, | |||
| 12 | 12 | ||
| 13 | if (!try_get_task_stack(target)) { | 13 | if (!try_get_task_stack(target)) { |
| 14 | /* Task has no stack, so the task isn't in a syscall. */ | 14 | /* Task has no stack, so the task isn't in a syscall. */ |
| 15 | *sp = *pc = 0; | ||
| 15 | *callno = -1; | 16 | *callno = -1; |
| 16 | return 0; | 17 | return 0; |
| 17 | } | 18 | } |
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 0362da0b66c3..a0f66280ea50 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
| @@ -434,6 +434,41 @@ loop: | |||
| 434 | return 0; | 434 | return 0; |
| 435 | } | 435 | } |
| 436 | 436 | ||
| 437 | static int __bpf_fill_stxdw(struct bpf_test *self, int size) | ||
| 438 | { | ||
| 439 | unsigned int len = BPF_MAXINSNS; | ||
| 440 | struct bpf_insn *insn; | ||
| 441 | int i; | ||
| 442 | |||
| 443 | insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); | ||
| 444 | if (!insn) | ||
| 445 | return -ENOMEM; | ||
| 446 | |||
| 447 | insn[0] = BPF_ALU32_IMM(BPF_MOV, R0, 1); | ||
| 448 | insn[1] = BPF_ST_MEM(size, R10, -40, 42); | ||
| 449 | |||
| 450 | for (i = 2; i < len - 2; i++) | ||
| 451 | insn[i] = BPF_STX_XADD(size, R10, R0, -40); | ||
| 452 | |||
| 453 | insn[len - 2] = BPF_LDX_MEM(size, R0, R10, -40); | ||
| 454 | insn[len - 1] = BPF_EXIT_INSN(); | ||
| 455 | |||
| 456 | self->u.ptr.insns = insn; | ||
| 457 | self->u.ptr.len = len; | ||
| 458 | |||
| 459 | return 0; | ||
| 460 | } | ||
| 461 | |||
| 462 | static int bpf_fill_stxw(struct bpf_test *self) | ||
| 463 | { | ||
| 464 | return __bpf_fill_stxdw(self, BPF_W); | ||
| 465 | } | ||
| 466 | |||
| 467 | static int bpf_fill_stxdw(struct bpf_test *self) | ||
| 468 | { | ||
| 469 | return __bpf_fill_stxdw(self, BPF_DW); | ||
| 470 | } | ||
| 471 | |||
| 437 | static struct bpf_test tests[] = { | 472 | static struct bpf_test tests[] = { |
| 438 | { | 473 | { |
| 439 | "TAX", | 474 | "TAX", |
| @@ -4303,6 +4338,41 @@ static struct bpf_test tests[] = { | |||
| 4303 | { { 0, 0x22 } }, | 4338 | { { 0, 0x22 } }, |
| 4304 | }, | 4339 | }, |
| 4305 | { | 4340 | { |
| 4341 | "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22", | ||
| 4342 | .u.insns_int = { | ||
| 4343 | BPF_ALU64_REG(BPF_MOV, R1, R10), | ||
| 4344 | BPF_ALU32_IMM(BPF_MOV, R0, 0x12), | ||
| 4345 | BPF_ST_MEM(BPF_W, R10, -40, 0x10), | ||
| 4346 | BPF_STX_XADD(BPF_W, R10, R0, -40), | ||
| 4347 | BPF_ALU64_REG(BPF_MOV, R0, R10), | ||
| 4348 | BPF_ALU64_REG(BPF_SUB, R0, R1), | ||
| 4349 | BPF_EXIT_INSN(), | ||
| 4350 | }, | ||
| 4351 | INTERNAL, | ||
| 4352 | { }, | ||
| 4353 | { { 0, 0 } }, | ||
| 4354 | }, | ||
| 4355 | { | ||
| 4356 | "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22", | ||
| 4357 | .u.insns_int = { | ||
| 4358 | BPF_ALU32_IMM(BPF_MOV, R0, 0x12), | ||
| 4359 | BPF_ST_MEM(BPF_W, R10, -40, 0x10), | ||
| 4360 | BPF_STX_XADD(BPF_W, R10, R0, -40), | ||
| 4361 | BPF_EXIT_INSN(), | ||
| 4362 | }, | ||
| 4363 | INTERNAL, | ||
| 4364 | { }, | ||
| 4365 | { { 0, 0x12 } }, | ||
| 4366 | }, | ||
| 4367 | { | ||
| 4368 | "STX_XADD_W: X + 1 + 1 + 1 + ...", | ||
| 4369 | { }, | ||
| 4370 | INTERNAL, | ||
| 4371 | { }, | ||
| 4372 | { { 0, 4134 } }, | ||
| 4373 | .fill_helper = bpf_fill_stxw, | ||
| 4374 | }, | ||
| 4375 | { | ||
| 4306 | "STX_XADD_DW: Test: 0x12 + 0x10 = 0x22", | 4376 | "STX_XADD_DW: Test: 0x12 + 0x10 = 0x22", |
| 4307 | .u.insns_int = { | 4377 | .u.insns_int = { |
| 4308 | BPF_ALU32_IMM(BPF_MOV, R0, 0x12), | 4378 | BPF_ALU32_IMM(BPF_MOV, R0, 0x12), |
| @@ -4315,6 +4385,41 @@ static struct bpf_test tests[] = { | |||
| 4315 | { }, | 4385 | { }, |
| 4316 | { { 0, 0x22 } }, | 4386 | { { 0, 0x22 } }, |
| 4317 | }, | 4387 | }, |
| 4388 | { | ||
| 4389 | "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22", | ||
| 4390 | .u.insns_int = { | ||
| 4391 | BPF_ALU64_REG(BPF_MOV, R1, R10), | ||
| 4392 | BPF_ALU32_IMM(BPF_MOV, R0, 0x12), | ||
| 4393 | BPF_ST_MEM(BPF_DW, R10, -40, 0x10), | ||
| 4394 | BPF_STX_XADD(BPF_DW, R10, R0, -40), | ||
| 4395 | BPF_ALU64_REG(BPF_MOV, R0, R10), | ||
| 4396 | BPF_ALU64_REG(BPF_SUB, R0, R1), | ||
| 4397 | BPF_EXIT_INSN(), | ||
| 4398 | }, | ||
| 4399 | INTERNAL, | ||
| 4400 | { }, | ||
| 4401 | { { 0, 0 } }, | ||
| 4402 | }, | ||
| 4403 | { | ||
| 4404 | "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22", | ||
| 4405 | .u.insns_int = { | ||
| 4406 | BPF_ALU32_IMM(BPF_MOV, R0, 0x12), | ||
| 4407 | BPF_ST_MEM(BPF_DW, R10, -40, 0x10), | ||
| 4408 | BPF_STX_XADD(BPF_DW, R10, R0, -40), | ||
| 4409 | BPF_EXIT_INSN(), | ||
| 4410 | }, | ||
| 4411 | INTERNAL, | ||
| 4412 | { }, | ||
| 4413 | { { 0, 0x12 } }, | ||
| 4414 | }, | ||
| 4415 | { | ||
| 4416 | "STX_XADD_DW: X + 1 + 1 + 1 + ...", | ||
| 4417 | { }, | ||
| 4418 | INTERNAL, | ||
| 4419 | { }, | ||
| 4420 | { { 0, 4134 } }, | ||
| 4421 | .fill_helper = bpf_fill_stxdw, | ||
| 4422 | }, | ||
| 4318 | /* BPF_JMP | BPF_EXIT */ | 4423 | /* BPF_JMP | BPF_EXIT */ |
| 4319 | { | 4424 | { |
| 4320 | "JMP_EXIT", | 4425 | "JMP_EXIT", |
| @@ -4656,6 +4761,51 @@ static struct bpf_test tests[] = { | |||
| 4656 | { }, | 4761 | { }, |
| 4657 | { { 0, 1 } }, | 4762 | { { 0, 1 } }, |
| 4658 | }, | 4763 | }, |
| 4764 | { | ||
| 4765 | /* Mainly testing JIT + imm64 here. */ | ||
| 4766 | "JMP_JGE_X: ldimm64 test 1", | ||
| 4767 | .u.insns_int = { | ||
| 4768 | BPF_ALU32_IMM(BPF_MOV, R0, 0), | ||
| 4769 | BPF_LD_IMM64(R1, 3), | ||
| 4770 | BPF_LD_IMM64(R2, 2), | ||
| 4771 | BPF_JMP_REG(BPF_JGE, R1, R2, 2), | ||
| 4772 | BPF_LD_IMM64(R0, 0xffffffffffffffffUL), | ||
| 4773 | BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL), | ||
| 4774 | BPF_EXIT_INSN(), | ||
| 4775 | }, | ||
| 4776 | INTERNAL, | ||
| 4777 | { }, | ||
| 4778 | { { 0, 0xeeeeeeeeU } }, | ||
| 4779 | }, | ||
| 4780 | { | ||
| 4781 | "JMP_JGE_X: ldimm64 test 2", | ||
| 4782 | .u.insns_int = { | ||
| 4783 | BPF_ALU32_IMM(BPF_MOV, R0, 0), | ||
| 4784 | BPF_LD_IMM64(R1, 3), | ||
| 4785 | BPF_LD_IMM64(R2, 2), | ||
| 4786 | BPF_JMP_REG(BPF_JGE, R1, R2, 0), | ||
| 4787 | BPF_LD_IMM64(R0, 0xffffffffffffffffUL), | ||
| 4788 | BPF_EXIT_INSN(), | ||
| 4789 | }, | ||
| 4790 | INTERNAL, | ||
| 4791 | { }, | ||
| 4792 | { { 0, 0xffffffffU } }, | ||
| 4793 | }, | ||
| 4794 | { | ||
| 4795 | "JMP_JGE_X: ldimm64 test 3", | ||
| 4796 | .u.insns_int = { | ||
| 4797 | BPF_ALU32_IMM(BPF_MOV, R0, 1), | ||
| 4798 | BPF_LD_IMM64(R1, 3), | ||
| 4799 | BPF_LD_IMM64(R2, 2), | ||
| 4800 | BPF_JMP_REG(BPF_JGE, R1, R2, 4), | ||
| 4801 | BPF_LD_IMM64(R0, 0xffffffffffffffffUL), | ||
| 4802 | BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL), | ||
| 4803 | BPF_EXIT_INSN(), | ||
| 4804 | }, | ||
| 4805 | INTERNAL, | ||
| 4806 | { }, | ||
| 4807 | { { 0, 1 } }, | ||
| 4808 | }, | ||
| 4659 | /* BPF_JMP | BPF_JNE | BPF_X */ | 4809 | /* BPF_JMP | BPF_JNE | BPF_X */ |
| 4660 | { | 4810 | { |
| 4661 | "JMP_JNE_X: if (3 != 2) return 1", | 4811 | "JMP_JNE_X: if (3 != 2) return 1", |
diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 0b1d3140fbb8..a25c9763fce1 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/string.h> | 20 | #include <linux/string.h> |
| 21 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
| 22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
| 23 | #include <linux/kasan.h> | ||
| 23 | 24 | ||
| 24 | /* | 25 | /* |
| 25 | * Note: test functions are marked noinline so that their names appear in | 26 | * Note: test functions are marked noinline so that their names appear in |
| @@ -474,6 +475,12 @@ static noinline void __init use_after_scope_test(void) | |||
| 474 | 475 | ||
| 475 | static int __init kmalloc_tests_init(void) | 476 | static int __init kmalloc_tests_init(void) |
| 476 | { | 477 | { |
| 478 | /* | ||
| 479 | * Temporarily enable multi-shot mode. Otherwise, we'd only get a | ||
| 480 | * report for the first case. | ||
| 481 | */ | ||
| 482 | bool multishot = kasan_save_enable_multi_shot(); | ||
| 483 | |||
| 477 | kmalloc_oob_right(); | 484 | kmalloc_oob_right(); |
| 478 | kmalloc_oob_left(); | 485 | kmalloc_oob_left(); |
| 479 | kmalloc_node_oob_right(); | 486 | kmalloc_node_oob_right(); |
| @@ -499,6 +506,9 @@ static int __init kmalloc_tests_init(void) | |||
| 499 | ksize_unpoisons_memory(); | 506 | ksize_unpoisons_memory(); |
| 500 | copy_user_test(); | 507 | copy_user_test(); |
| 501 | use_after_scope_test(); | 508 | use_after_scope_test(); |
| 509 | |||
| 510 | kasan_restore_multi_shot(multishot); | ||
| 511 | |||
| 502 | return -EAGAIN; | 512 | return -EAGAIN; |
| 503 | } | 513 | } |
| 504 | 514 | ||
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c index 1a8d71a68531..4621db801b23 100644 --- a/lib/test_user_copy.c +++ b/lib/test_user_copy.c | |||
| @@ -31,7 +31,6 @@ | |||
| 31 | * their capability at compile-time, we just have to opt-out certain archs. | 31 | * their capability at compile-time, we just have to opt-out certain archs. |
| 32 | */ | 32 | */ |
| 33 | #if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \ | 33 | #if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \ |
| 34 | !defined(CONFIG_AVR32) && \ | ||
| 35 | !defined(CONFIG_BLACKFIN) && \ | 34 | !defined(CONFIG_BLACKFIN) && \ |
| 36 | !defined(CONFIG_M32R) && \ | 35 | !defined(CONFIG_M32R) && \ |
| 37 | !defined(CONFIG_M68K) && \ | 36 | !defined(CONFIG_M68K) && \ |
diff --git a/lib/usercopy.c b/lib/usercopy.c new file mode 100644 index 000000000000..1b6010a3beb8 --- /dev/null +++ b/lib/usercopy.c | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | #include <linux/uaccess.h> | ||
| 2 | |||
| 3 | /* out-of-line parts */ | ||
| 4 | |||
| 5 | #ifndef INLINE_COPY_FROM_USER | ||
| 6 | unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) | ||
| 7 | { | ||
| 8 | unsigned long res = n; | ||
| 9 | if (likely(access_ok(VERIFY_READ, from, n))) | ||
| 10 | res = raw_copy_from_user(to, from, n); | ||
| 11 | if (unlikely(res)) | ||
| 12 | memset(to + (n - res), 0, res); | ||
| 13 | return res; | ||
| 14 | } | ||
| 15 | EXPORT_SYMBOL(_copy_from_user); | ||
| 16 | #endif | ||
| 17 | |||
| 18 | #ifndef INLINE_COPY_TO_USER | ||
| 19 | unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n) | ||
| 20 | { | ||
| 21 | if (likely(access_ok(VERIFY_WRITE, to, n))) | ||
| 22 | n = raw_copy_to_user(to, from, n); | ||
| 23 | return n; | ||
| 24 | } | ||
| 25 | EXPORT_SYMBOL(_copy_to_user); | ||
| 26 | #endif | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index e3bf4e0f10b5..176641cc549d 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -1954,13 +1954,13 @@ set_precision(struct printf_spec *spec, int prec) | |||
| 1954 | * This function generally follows C99 vsnprintf, but has some | 1954 | * This function generally follows C99 vsnprintf, but has some |
| 1955 | * extensions and a few limitations: | 1955 | * extensions and a few limitations: |
| 1956 | * | 1956 | * |
| 1957 | * %n is unsupported | 1957 | * - ``%n`` is unsupported |
| 1958 | * %p* is handled by pointer() | 1958 | * - ``%p*`` is handled by pointer() |
| 1959 | * | 1959 | * |
| 1960 | * See pointer() or Documentation/printk-formats.txt for more | 1960 | * See pointer() or Documentation/printk-formats.txt for more |
| 1961 | * extensive description. | 1961 | * extensive description. |
| 1962 | * | 1962 | * |
| 1963 | * ** Please update the documentation in both places when making changes ** | 1963 | * **Please update the documentation in both places when making changes** |
| 1964 | * | 1964 | * |
| 1965 | * The return value is the number of characters which would | 1965 | * The return value is the number of characters which would |
| 1966 | * be generated for the given input, excluding the trailing | 1966 | * be generated for the given input, excluding the trailing |
