diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 7 | ||||
| -rw-r--r-- | lib/iov_iter.c | 4 | ||||
| -rw-r--r-- | lib/lzo/lzo1x_compress.c | 9 | ||||
| -rw-r--r-- | lib/lzo/lzo1x_decompress_safe.c | 4 | ||||
| -rw-r--r-- | lib/string.c | 20 | ||||
| -rw-r--r-- | lib/syscall.c | 57 | ||||
| -rw-r--r-- | lib/test_vmalloc.c | 6 |
7 files changed, 63 insertions, 44 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0d9e81779e37..d5a4a4036d2f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -753,9 +753,9 @@ endmenu # "Memory Debugging" | |||
| 753 | config ARCH_HAS_KCOV | 753 | config ARCH_HAS_KCOV |
| 754 | bool | 754 | bool |
| 755 | help | 755 | help |
| 756 | KCOV does not have any arch-specific code, but currently it is enabled | 756 | An architecture should select this when it can successfully |
| 757 | only for x86_64. KCOV requires testing on other archs, and most likely | 757 | build and run with CONFIG_KCOV. This typically requires |
| 758 | disabling of instrumentation for some early boot code. | 758 | disabling instrumentation for some early boot code. |
| 759 | 759 | ||
| 760 | config CC_HAS_SANCOV_TRACE_PC | 760 | config CC_HAS_SANCOV_TRACE_PC |
| 761 | def_bool $(cc-option,-fsanitize-coverage=trace-pc) | 761 | def_bool $(cc-option,-fsanitize-coverage=trace-pc) |
| @@ -1929,6 +1929,7 @@ config TEST_KMOD | |||
| 1929 | depends on m | 1929 | depends on m |
| 1930 | depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS | 1930 | depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS |
| 1931 | depends on NETDEVICES && NET_CORE && INET # for TUN | 1931 | depends on NETDEVICES && NET_CORE && INET # for TUN |
| 1932 | depends on BLOCK | ||
| 1932 | select TEST_LKM | 1933 | select TEST_LKM |
| 1933 | select XFS_FS | 1934 | select XFS_FS |
| 1934 | select TUN | 1935 | select TUN |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index ea36dc355da1..b396d328a764 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
| @@ -1528,6 +1528,7 @@ EXPORT_SYMBOL(csum_and_copy_to_iter); | |||
| 1528 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, | 1528 | size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, |
| 1529 | struct iov_iter *i) | 1529 | struct iov_iter *i) |
| 1530 | { | 1530 | { |
| 1531 | #ifdef CONFIG_CRYPTO | ||
| 1531 | struct ahash_request *hash = hashp; | 1532 | struct ahash_request *hash = hashp; |
| 1532 | struct scatterlist sg; | 1533 | struct scatterlist sg; |
| 1533 | size_t copied; | 1534 | size_t copied; |
| @@ -1537,6 +1538,9 @@ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, | |||
| 1537 | ahash_request_set_crypt(hash, &sg, NULL, copied); | 1538 | ahash_request_set_crypt(hash, &sg, NULL, copied); |
| 1538 | crypto_ahash_update(hash); | 1539 | crypto_ahash_update(hash); |
| 1539 | return copied; | 1540 | return copied; |
| 1541 | #else | ||
| 1542 | return 0; | ||
| 1543 | #endif | ||
| 1540 | } | 1544 | } |
| 1541 | EXPORT_SYMBOL(hash_and_copy_to_iter); | 1545 | EXPORT_SYMBOL(hash_and_copy_to_iter); |
| 1542 | 1546 | ||
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 4525fb094844..a8ede77afe0d 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c | |||
| @@ -291,13 +291,14 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, | |||
| 291 | { | 291 | { |
| 292 | const unsigned char *ip = in; | 292 | const unsigned char *ip = in; |
| 293 | unsigned char *op = out; | 293 | unsigned char *op = out; |
| 294 | unsigned char *data_start; | ||
| 294 | size_t l = in_len; | 295 | size_t l = in_len; |
| 295 | size_t t = 0; | 296 | size_t t = 0; |
| 296 | signed char state_offset = -2; | 297 | signed char state_offset = -2; |
| 297 | unsigned int m4_max_offset; | 298 | unsigned int m4_max_offset; |
| 298 | 299 | ||
| 299 | // LZO v0 will never write 17 as first byte, | 300 | // LZO v0 will never write 17 as first byte (except for zero-length |
| 300 | // so this is used to version the bitstream | 301 | // input), so this is used to version the bitstream |
| 301 | if (bitstream_version > 0) { | 302 | if (bitstream_version > 0) { |
| 302 | *op++ = 17; | 303 | *op++ = 17; |
| 303 | *op++ = bitstream_version; | 304 | *op++ = bitstream_version; |
| @@ -306,6 +307,8 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, | |||
| 306 | m4_max_offset = M4_MAX_OFFSET_V0; | 307 | m4_max_offset = M4_MAX_OFFSET_V0; |
| 307 | } | 308 | } |
| 308 | 309 | ||
| 310 | data_start = op; | ||
| 311 | |||
| 309 | while (l > 20) { | 312 | while (l > 20) { |
| 310 | size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1); | 313 | size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1); |
| 311 | uintptr_t ll_end = (uintptr_t) ip + ll; | 314 | uintptr_t ll_end = (uintptr_t) ip + ll; |
| @@ -324,7 +327,7 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, | |||
| 324 | if (t > 0) { | 327 | if (t > 0) { |
| 325 | const unsigned char *ii = in + in_len - t; | 328 | const unsigned char *ii = in + in_len - t; |
| 326 | 329 | ||
| 327 | if (op == out && t <= 238) { | 330 | if (op == data_start && t <= 238) { |
| 328 | *op++ = (17 + t); | 331 | *op++ = (17 + t); |
| 329 | } else if (t <= 3) { | 332 | } else if (t <= 3) { |
| 330 | op[state_offset] |= t; | 333 | op[state_offset] |= t; |
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c index 6d2600ea3b55..9e07e9ef1aad 100644 --- a/lib/lzo/lzo1x_decompress_safe.c +++ b/lib/lzo/lzo1x_decompress_safe.c | |||
| @@ -54,11 +54,9 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, | |||
| 54 | if (unlikely(in_len < 3)) | 54 | if (unlikely(in_len < 3)) |
| 55 | goto input_overrun; | 55 | goto input_overrun; |
| 56 | 56 | ||
| 57 | if (likely(*ip == 17)) { | 57 | if (likely(in_len >= 5) && likely(*ip == 17)) { |
| 58 | bitstream_version = ip[1]; | 58 | bitstream_version = ip[1]; |
| 59 | ip += 2; | 59 | ip += 2; |
| 60 | if (unlikely(in_len < 5)) | ||
| 61 | goto input_overrun; | ||
| 62 | } else { | 60 | } else { |
| 63 | bitstream_version = 0; | 61 | bitstream_version = 0; |
| 64 | } | 62 | } |
diff --git a/lib/string.c b/lib/string.c index 38e4ca08e757..3ab861c1a857 100644 --- a/lib/string.c +++ b/lib/string.c | |||
| @@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count) | |||
| 866 | EXPORT_SYMBOL(memcmp); | 866 | EXPORT_SYMBOL(memcmp); |
| 867 | #endif | 867 | #endif |
| 868 | 868 | ||
| 869 | #ifndef __HAVE_ARCH_BCMP | ||
| 870 | /** | ||
| 871 | * bcmp - returns 0 if and only if the buffers have identical contents. | ||
| 872 | * @a: pointer to first buffer. | ||
| 873 | * @b: pointer to second buffer. | ||
| 874 | * @len: size of buffers. | ||
| 875 | * | ||
| 876 | * The sign or magnitude of a non-zero return value has no particular | ||
| 877 | * meaning, and architectures may implement their own more efficient bcmp(). So | ||
| 878 | * while this particular implementation is a simple (tail) call to memcmp, do | ||
| 879 | * not rely on anything but whether the return value is zero or non-zero. | ||
| 880 | */ | ||
| 881 | #undef bcmp | ||
| 882 | int bcmp(const void *a, const void *b, size_t len) | ||
| 883 | { | ||
| 884 | return memcmp(a, b, len); | ||
| 885 | } | ||
| 886 | EXPORT_SYMBOL(bcmp); | ||
| 887 | #endif | ||
| 888 | |||
| 869 | #ifndef __HAVE_ARCH_MEMSCAN | 889 | #ifndef __HAVE_ARCH_MEMSCAN |
| 870 | /** | 890 | /** |
| 871 | * memscan - Find a character in an area of memory. | 891 | * memscan - Find a character in an area of memory. |
diff --git a/lib/syscall.c b/lib/syscall.c index 1a7077f20eae..fb328e7ccb08 100644 --- a/lib/syscall.c +++ b/lib/syscall.c | |||
| @@ -5,16 +5,14 @@ | |||
| 5 | #include <linux/export.h> | 5 | #include <linux/export.h> |
| 6 | #include <asm/syscall.h> | 6 | #include <asm/syscall.h> |
| 7 | 7 | ||
| 8 | static int collect_syscall(struct task_struct *target, long *callno, | 8 | static int collect_syscall(struct task_struct *target, struct syscall_info *info) |
| 9 | unsigned long args[6], unsigned int maxargs, | ||
| 10 | unsigned long *sp, unsigned long *pc) | ||
| 11 | { | 9 | { |
| 12 | struct pt_regs *regs; | 10 | struct pt_regs *regs; |
| 13 | 11 | ||
| 14 | if (!try_get_task_stack(target)) { | 12 | if (!try_get_task_stack(target)) { |
| 15 | /* Task has no stack, so the task isn't in a syscall. */ | 13 | /* Task has no stack, so the task isn't in a syscall. */ |
| 16 | *sp = *pc = 0; | 14 | memset(info, 0, sizeof(*info)); |
| 17 | *callno = -1; | 15 | info->data.nr = -1; |
| 18 | return 0; | 16 | return 0; |
| 19 | } | 17 | } |
| 20 | 18 | ||
| @@ -24,12 +22,13 @@ static int collect_syscall(struct task_struct *target, long *callno, | |||
| 24 | return -EAGAIN; | 22 | return -EAGAIN; |
| 25 | } | 23 | } |
| 26 | 24 | ||
| 27 | *sp = user_stack_pointer(regs); | 25 | info->sp = user_stack_pointer(regs); |
| 28 | *pc = instruction_pointer(regs); | 26 | info->data.instruction_pointer = instruction_pointer(regs); |
| 29 | 27 | ||
| 30 | *callno = syscall_get_nr(target, regs); | 28 | info->data.nr = syscall_get_nr(target, regs); |
| 31 | if (*callno != -1L && maxargs > 0) | 29 | if (info->data.nr != -1L) |
| 32 | syscall_get_arguments(target, regs, 0, maxargs, args); | 30 | syscall_get_arguments(target, regs, |
| 31 | (unsigned long *)&info->data.args[0]); | ||
| 33 | 32 | ||
| 34 | put_task_stack(target); | 33 | put_task_stack(target); |
| 35 | return 0; | 34 | return 0; |
| @@ -38,41 +37,35 @@ static int collect_syscall(struct task_struct *target, long *callno, | |||
| 38 | /** | 37 | /** |
| 39 | * task_current_syscall - Discover what a blocked task is doing. | 38 | * task_current_syscall - Discover what a blocked task is doing. |
| 40 | * @target: thread to examine | 39 | * @target: thread to examine |
| 41 | * @callno: filled with system call number or -1 | 40 | * @info: structure with the following fields: |
| 42 | * @args: filled with @maxargs system call arguments | 41 | * .sp - filled with user stack pointer |
| 43 | * @maxargs: number of elements in @args to fill | 42 | * .data.nr - filled with system call number or -1 |
| 44 | * @sp: filled with user stack pointer | 43 | * .data.args - filled with @maxargs system call arguments |
| 45 | * @pc: filled with user PC | 44 | * .data.instruction_pointer - filled with user PC |
| 46 | * | 45 | * |
| 47 | * If @target is blocked in a system call, returns zero with *@callno | 46 | * If @target is blocked in a system call, returns zero with @info.data.nr |
| 48 | * set to the the call's number and @args filled in with its arguments. | 47 | * set to the the call's number and @info.data.args filled in with its |
| 49 | * Registers not used for system call arguments may not be available and | 48 | * arguments. Registers not used for system call arguments may not be available |
| 50 | * it is not kosher to use &struct user_regset calls while the system | 49 | * and it is not kosher to use &struct user_regset calls while the system |
| 51 | * call is still in progress. Note we may get this result if @target | 50 | * call is still in progress. Note we may get this result if @target |
| 52 | * has finished its system call but not yet returned to user mode, such | 51 | * has finished its system call but not yet returned to user mode, such |
| 53 | * as when it's stopped for signal handling or syscall exit tracing. | 52 | * as when it's stopped for signal handling or syscall exit tracing. |
| 54 | * | 53 | * |
| 55 | * If @target is blocked in the kernel during a fault or exception, | 54 | * If @target is blocked in the kernel during a fault or exception, |
| 56 | * returns zero with *@callno set to -1 and does not fill in @args. | 55 | * returns zero with *@info.data.nr set to -1 and does not fill in |
| 57 | * If so, it's now safe to examine @target using &struct user_regset | 56 | * @info.data.args. If so, it's now safe to examine @target using |
| 58 | * get() calls as long as we're sure @target won't return to user mode. | 57 | * &struct user_regset get() calls as long as we're sure @target won't return |
| 58 | * to user mode. | ||
| 59 | * | 59 | * |
| 60 | * Returns -%EAGAIN if @target does not remain blocked. | 60 | * Returns -%EAGAIN if @target does not remain blocked. |
| 61 | * | ||
| 62 | * Returns -%EINVAL if @maxargs is too large (maximum is six). | ||
| 63 | */ | 61 | */ |
| 64 | int task_current_syscall(struct task_struct *target, long *callno, | 62 | int task_current_syscall(struct task_struct *target, struct syscall_info *info) |
| 65 | unsigned long args[6], unsigned int maxargs, | ||
| 66 | unsigned long *sp, unsigned long *pc) | ||
| 67 | { | 63 | { |
| 68 | long state; | 64 | long state; |
| 69 | unsigned long ncsw; | 65 | unsigned long ncsw; |
| 70 | 66 | ||
| 71 | if (unlikely(maxargs > 6)) | ||
| 72 | return -EINVAL; | ||
| 73 | |||
| 74 | if (target == current) | 67 | if (target == current) |
| 75 | return collect_syscall(target, callno, args, maxargs, sp, pc); | 68 | return collect_syscall(target, info); |
| 76 | 69 | ||
| 77 | state = target->state; | 70 | state = target->state; |
| 78 | if (unlikely(!state)) | 71 | if (unlikely(!state)) |
| @@ -80,7 +73,7 @@ int task_current_syscall(struct task_struct *target, long *callno, | |||
| 80 | 73 | ||
| 81 | ncsw = wait_task_inactive(target, state); | 74 | ncsw = wait_task_inactive(target, state); |
| 82 | if (unlikely(!ncsw) || | 75 | if (unlikely(!ncsw) || |
| 83 | unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || | 76 | unlikely(collect_syscall(target, info)) || |
| 84 | unlikely(wait_task_inactive(target, state) != ncsw)) | 77 | unlikely(wait_task_inactive(target, state) != ncsw)) |
| 85 | return -EAGAIN; | 78 | return -EAGAIN; |
| 86 | 79 | ||
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 83cdcaa82bf6..f832b095afba 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c | |||
| @@ -383,14 +383,14 @@ static void shuffle_array(int *arr, int n) | |||
| 383 | static int test_func(void *private) | 383 | static int test_func(void *private) |
| 384 | { | 384 | { |
| 385 | struct test_driver *t = private; | 385 | struct test_driver *t = private; |
| 386 | cpumask_t newmask = CPU_MASK_NONE; | ||
| 387 | int random_array[ARRAY_SIZE(test_case_array)]; | 386 | int random_array[ARRAY_SIZE(test_case_array)]; |
| 388 | int index, i, j, ret; | 387 | int index, i, j, ret; |
| 389 | ktime_t kt; | 388 | ktime_t kt; |
| 390 | u64 delta; | 389 | u64 delta; |
| 391 | 390 | ||
| 392 | cpumask_set_cpu(t->cpu, &newmask); | 391 | ret = set_cpus_allowed_ptr(current, cpumask_of(t->cpu)); |
| 393 | set_cpus_allowed_ptr(current, &newmask); | 392 | if (ret < 0) |
| 393 | pr_err("Failed to set affinity to %d CPU\n", t->cpu); | ||
| 394 | 394 | ||
| 395 | for (i = 0; i < ARRAY_SIZE(test_case_array); i++) | 395 | for (i = 0; i < ARRAY_SIZE(test_case_array); i++) |
| 396 | random_array[i] = i; | 396 | random_array[i] = i; |
