diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 1 | ||||
| -rw-r--r-- | lib/Kconfig.kasan | 8 | ||||
| -rw-r--r-- | lib/cpumask.c | 74 | ||||
| -rw-r--r-- | lib/find_last_bit.c | 41 | ||||
| -rw-r--r-- | lib/mpi/longlong.h | 4 | ||||
| -rw-r--r-- | lib/percpu_counter.c | 6 | ||||
| -rw-r--r-- | lib/rhashtable.c | 23 | ||||
| -rw-r--r-- | lib/string.c | 2 | ||||
| -rw-r--r-- | lib/strnlen_user.c | 12 | ||||
| -rw-r--r-- | lib/swiotlb.c | 5 | 
10 files changed, 72 insertions, 104 deletions
| diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 17670573dda8..ba2b0c87e65b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -1281,6 +1281,7 @@ config RCU_TORTURE_TEST_SLOW_INIT_DELAY | |||
| 1281 | int "How much to slow down RCU grace-period initialization" | 1281 | int "How much to slow down RCU grace-period initialization" | 
| 1282 | range 0 5 | 1282 | range 0 5 | 
| 1283 | default 3 | 1283 | default 3 | 
| 1284 | depends on RCU_TORTURE_TEST_SLOW_INIT | ||
| 1284 | help | 1285 | help | 
| 1285 | This option specifies the number of jiffies to wait between | 1286 | This option specifies the number of jiffies to wait between | 
| 1286 | each rcu_node structure initialization. | 1287 | each rcu_node structure initialization. | 
| diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 4fecaedc80a2..777eda7d1ab4 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan | |||
| @@ -10,8 +10,11 @@ config KASAN | |||
| 10 | help | 10 | help | 
| 11 | Enables kernel address sanitizer - runtime memory debugger, | 11 | Enables kernel address sanitizer - runtime memory debugger, | 
| 12 | designed to find out-of-bounds accesses and use-after-free bugs. | 12 | designed to find out-of-bounds accesses and use-after-free bugs. | 
| 13 | This is strictly debugging feature. It consumes about 1/8 | 13 | This is strictly a debugging feature and it requires a gcc version | 
| 14 | of available memory and brings about ~x3 performance slowdown. | 14 | of 4.9.2 or later. Detection of out of bounds accesses to stack or | 
| 15 | global variables requires gcc 5.0 or later. | ||
| 16 | This feature consumes about 1/8 of available memory and brings about | ||
| 17 | ~x3 performance slowdown. | ||
| 15 | For better error detection enable CONFIG_STACKTRACE, | 18 | For better error detection enable CONFIG_STACKTRACE, | 
| 16 | and add slub_debug=U to boot cmdline. | 19 | and add slub_debug=U to boot cmdline. | 
| 17 | 20 | ||
| @@ -40,6 +43,7 @@ config KASAN_INLINE | |||
| 40 | memory accesses. This is faster than outline (in some workloads | 43 | memory accesses. This is faster than outline (in some workloads | 
| 41 | it gives about x2 boost over outline instrumentation), but | 44 | it gives about x2 boost over outline instrumentation), but | 
| 42 | make kernel's .text size much bigger. | 45 | make kernel's .text size much bigger. | 
| 46 | This requires a gcc version of 5.0 or later. | ||
| 43 | 47 | ||
| 44 | endchoice | 48 | endchoice | 
| 45 | 49 | ||
| diff --git a/lib/cpumask.c b/lib/cpumask.c index 830dd5dec40f..5f627084f2e9 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c | |||
| @@ -139,64 +139,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) | |||
| 139 | #endif | 139 | #endif | 
| 140 | 140 | ||
| 141 | /** | 141 | /** | 
| 142 | * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first | 142 | * cpumask_local_spread - select the i'th cpu with local numa cpu's first | 
| 143 | * | ||
| 144 | * @i: index number | 143 | * @i: index number | 
| 145 | * @numa_node: local numa_node | 144 | * @node: local numa_node | 
| 146 | * @dstp: cpumask with the relevant cpu bit set according to the policy | ||
| 147 | * | 145 | * | 
| 148 | * This function sets the cpumask according to a numa aware policy. | 146 | * This function selects an online CPU according to a numa aware policy; | 
| 149 | * cpumask could be used as an affinity hint for the IRQ related to a | 147 | * local cpus are returned first, followed by non-local ones, then it | 
| 150 | * queue. When the policy is to spread queues across cores - local cores | 148 | * wraps around. | 
| 151 | * first. | ||
| 152 | * | 149 | * | 
| 153 | * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set | 150 | * It's not very efficient, but useful for setup. | 
| 154 | * the cpu bit and need to re-call the function. | ||
| 155 | */ | 151 | */ | 
| 156 | int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) | 152 | unsigned int cpumask_local_spread(unsigned int i, int node) | 
| 157 | { | 153 | { | 
| 158 | cpumask_var_t mask; | ||
| 159 | int cpu; | 154 | int cpu; | 
| 160 | int ret = 0; | ||
| 161 | |||
| 162 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) | ||
| 163 | return -ENOMEM; | ||
| 164 | 155 | ||
| 156 | /* Wrap: we always want a cpu. */ | ||
| 165 | i %= num_online_cpus(); | 157 | i %= num_online_cpus(); | 
| 166 | 158 | ||
| 167 | if (numa_node == -1 || !cpumask_of_node(numa_node)) { | 159 | if (node == -1) { | 
| 168 | /* Use all online cpu's for non numa aware system */ | 160 | for_each_cpu(cpu, cpu_online_mask) | 
| 169 | cpumask_copy(mask, cpu_online_mask); | 161 | if (i-- == 0) | 
| 162 | return cpu; | ||
| 170 | } else { | 163 | } else { | 
| 171 | int n; | 164 | /* NUMA first. */ | 
| 172 | 165 | for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) | |
| 173 | cpumask_and(mask, | 166 | if (i-- == 0) | 
| 174 | cpumask_of_node(numa_node), cpu_online_mask); | 167 | return cpu; | 
| 175 | 168 | ||
| 176 | n = cpumask_weight(mask); | 169 | for_each_cpu(cpu, cpu_online_mask) { | 
| 177 | if (i >= n) { | 170 | /* Skip NUMA nodes, done above. */ | 
| 178 | i -= n; | 171 | if (cpumask_test_cpu(cpu, cpumask_of_node(node))) | 
| 179 | 172 | continue; | |
| 180 | /* If index > number of local cpu's, mask out local | 173 | |
| 181 | * cpu's | 174 | if (i-- == 0) | 
| 182 | */ | 175 | return cpu; | 
| 183 | cpumask_andnot(mask, cpu_online_mask, mask); | ||
| 184 | } | 176 | } | 
| 185 | } | 177 | } | 
| 186 | 178 | BUG(); | |
| 187 | for_each_cpu(cpu, mask) { | ||
| 188 | if (--i < 0) | ||
| 189 | goto out; | ||
| 190 | } | ||
| 191 | |||
| 192 | ret = -EAGAIN; | ||
| 193 | |||
| 194 | out: | ||
| 195 | free_cpumask_var(mask); | ||
| 196 | |||
| 197 | if (!ret) | ||
| 198 | cpumask_set_cpu(cpu, dstp); | ||
| 199 | |||
| 200 | return ret; | ||
| 201 | } | 179 | } | 
| 202 | EXPORT_SYMBOL(cpumask_set_cpu_local_first); | 180 | EXPORT_SYMBOL(cpumask_local_spread); | 
| diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c deleted file mode 100644 index 3e3be40c6a6e..000000000000 --- a/lib/find_last_bit.c +++ /dev/null | |||
| @@ -1,41 +0,0 @@ | |||
| 1 | /* find_last_bit.c: fallback find next bit implementation | ||
| 2 | * | ||
| 3 | * Copyright (C) 2008 IBM Corporation | ||
| 4 | * Written by Rusty Russell <rusty@rustcorp.com.au> | ||
| 5 | * (Inspired by David Howell's find_next_bit implementation) | ||
| 6 | * | ||
| 7 | * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease | ||
| 8 | * size and improve performance, 2015. | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or | ||
| 11 | * modify it under the terms of the GNU General Public License | ||
| 12 | * as published by the Free Software Foundation; either version | ||
| 13 | * 2 of the License, or (at your option) any later version. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include <linux/bitops.h> | ||
| 17 | #include <linux/bitmap.h> | ||
| 18 | #include <linux/export.h> | ||
| 19 | #include <linux/kernel.h> | ||
| 20 | |||
| 21 | #ifndef find_last_bit | ||
| 22 | |||
| 23 | unsigned long find_last_bit(const unsigned long *addr, unsigned long size) | ||
| 24 | { | ||
| 25 | if (size) { | ||
| 26 | unsigned long val = BITMAP_LAST_WORD_MASK(size); | ||
| 27 | unsigned long idx = (size-1) / BITS_PER_LONG; | ||
| 28 | |||
| 29 | do { | ||
| 30 | val &= addr[idx]; | ||
| 31 | if (val) | ||
| 32 | return idx * BITS_PER_LONG + __fls(val); | ||
| 33 | |||
| 34 | val = ~0ul; | ||
| 35 | } while (idx--); | ||
| 36 | } | ||
| 37 | return size; | ||
| 38 | } | ||
| 39 | EXPORT_SYMBOL(find_last_bit); | ||
| 40 | |||
| 41 | #endif | ||
| diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index aac511417ad1..a89d041592c8 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h | |||
| @@ -639,7 +639,7 @@ do { \ | |||
| 639 | ************** MIPS ***************** | 639 | ************** MIPS ***************** | 
| 640 | ***************************************/ | 640 | ***************************************/ | 
| 641 | #if defined(__mips__) && W_TYPE_SIZE == 32 | 641 | #if defined(__mips__) && W_TYPE_SIZE == 32 | 
| 642 | #if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 | 642 | #if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) | 
| 643 | #define umul_ppmm(w1, w0, u, v) \ | 643 | #define umul_ppmm(w1, w0, u, v) \ | 
| 644 | do { \ | 644 | do { \ | 
| 645 | UDItype __ll = (UDItype)(u) * (v); \ | 645 | UDItype __ll = (UDItype)(u) * (v); \ | 
| @@ -671,7 +671,7 @@ do { \ | |||
| 671 | ************** MIPS/64 ************** | 671 | ************** MIPS/64 ************** | 
| 672 | ***************************************/ | 672 | ***************************************/ | 
| 673 | #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 | 673 | #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 | 
| 674 | #if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4 | 674 | #if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) | 
| 675 | #define umul_ppmm(w1, w0, u, v) \ | 675 | #define umul_ppmm(w1, w0, u, v) \ | 
| 676 | do { \ | 676 | do { \ | 
| 677 | typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ | 677 | typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ | 
| diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 48144cdae819..f051d69f0910 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
| @@ -197,13 +197,13 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb, | |||
| 197 | * Compare counter against given value. | 197 | * Compare counter against given value. | 
| 198 | * Return 1 if greater, 0 if equal and -1 if less | 198 | * Return 1 if greater, 0 if equal and -1 if less | 
| 199 | */ | 199 | */ | 
| 200 | int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) | 200 | int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) | 
| 201 | { | 201 | { | 
| 202 | s64 count; | 202 | s64 count; | 
| 203 | 203 | ||
| 204 | count = percpu_counter_read(fbc); | 204 | count = percpu_counter_read(fbc); | 
| 205 | /* Check to see if rough count will be sufficient for comparison */ | 205 | /* Check to see if rough count will be sufficient for comparison */ | 
| 206 | if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) { | 206 | if (abs(count - rhs) > (batch * num_online_cpus())) { | 
| 207 | if (count > rhs) | 207 | if (count > rhs) | 
| 208 | return 1; | 208 | return 1; | 
| 209 | else | 209 | else | 
| @@ -218,7 +218,7 @@ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) | |||
| 218 | else | 218 | else | 
| 219 | return 0; | 219 | return 0; | 
| 220 | } | 220 | } | 
| 221 | EXPORT_SYMBOL(percpu_counter_compare); | 221 | EXPORT_SYMBOL(__percpu_counter_compare); | 
| 222 | 222 | ||
| 223 | static int __init percpu_counter_startup(void) | 223 | static int __init percpu_counter_startup(void) | 
| 224 | { | 224 | { | 
| diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 4898442b837f..8609378e6505 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. | 
| 15 | */ | 15 | */ | 
| 16 | 16 | ||
| 17 | #include <linux/atomic.h> | ||
| 17 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> | 
| 18 | #include <linux/init.h> | 19 | #include <linux/init.h> | 
| 19 | #include <linux/log2.h> | 20 | #include <linux/log2.h> | 
| @@ -25,6 +26,7 @@ | |||
| 25 | #include <linux/random.h> | 26 | #include <linux/random.h> | 
| 26 | #include <linux/rhashtable.h> | 27 | #include <linux/rhashtable.h> | 
| 27 | #include <linux/err.h> | 28 | #include <linux/err.h> | 
| 29 | #include <linux/export.h> | ||
| 28 | 30 | ||
| 29 | #define HASH_DEFAULT_SIZE 64UL | 31 | #define HASH_DEFAULT_SIZE 64UL | 
| 30 | #define HASH_MIN_SIZE 4U | 32 | #define HASH_MIN_SIZE 4U | 
| @@ -405,13 +407,18 @@ int rhashtable_insert_rehash(struct rhashtable *ht) | |||
| 405 | 407 | ||
| 406 | if (rht_grow_above_75(ht, tbl)) | 408 | if (rht_grow_above_75(ht, tbl)) | 
| 407 | size *= 2; | 409 | size *= 2; | 
| 408 | /* More than two rehashes (not resizes) detected. */ | 410 | /* Do not schedule more than one rehash */ | 
| 409 | else if (WARN_ON(old_tbl != tbl && old_tbl->size == size)) | 411 | else if (old_tbl != tbl) | 
| 410 | return -EBUSY; | 412 | return -EBUSY; | 
| 411 | 413 | ||
| 412 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); | 414 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); | 
| 413 | if (new_tbl == NULL) | 415 | if (new_tbl == NULL) { | 
| 416 | /* Schedule async resize/rehash to try allocation | ||
| 417 | * non-atomic context. | ||
| 418 | */ | ||
| 419 | schedule_work(&ht->run_work); | ||
| 414 | return -ENOMEM; | 420 | return -ENOMEM; | 
| 421 | } | ||
| 415 | 422 | ||
| 416 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); | 423 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); | 
| 417 | if (err) { | 424 | if (err) { | 
| @@ -441,6 +448,10 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key, | |||
| 441 | if (key && rhashtable_lookup_fast(ht, key, ht->p)) | 448 | if (key && rhashtable_lookup_fast(ht, key, ht->p)) | 
| 442 | goto exit; | 449 | goto exit; | 
| 443 | 450 | ||
| 451 | err = -E2BIG; | ||
| 452 | if (unlikely(rht_grow_above_max(ht, tbl))) | ||
| 453 | goto exit; | ||
| 454 | |||
| 444 | err = -EAGAIN; | 455 | err = -EAGAIN; | 
| 445 | if (rhashtable_check_elasticity(ht, tbl, hash) || | 456 | if (rhashtable_check_elasticity(ht, tbl, hash) || | 
| 446 | rht_grow_above_100(ht, tbl)) | 457 | rht_grow_above_100(ht, tbl)) | 
| @@ -733,6 +744,12 @@ int rhashtable_init(struct rhashtable *ht, | |||
| 733 | if (params->max_size) | 744 | if (params->max_size) | 
| 734 | ht->p.max_size = rounddown_pow_of_two(params->max_size); | 745 | ht->p.max_size = rounddown_pow_of_two(params->max_size); | 
| 735 | 746 | ||
| 747 | if (params->insecure_max_entries) | ||
| 748 | ht->p.insecure_max_entries = | ||
| 749 | rounddown_pow_of_two(params->insecure_max_entries); | ||
| 750 | else | ||
| 751 | ht->p.insecure_max_entries = ht->p.max_size * 2; | ||
| 752 | |||
| 736 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); | 753 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); | 
| 737 | 754 | ||
| 738 | /* The maximum (not average) chain length grows with the | 755 | /* The maximum (not average) chain length grows with the | 
| diff --git a/lib/string.c b/lib/string.c index a5792019193c..bb3d4b6993c4 100644 --- a/lib/string.c +++ b/lib/string.c | |||
| @@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset); | |||
| 607 | void memzero_explicit(void *s, size_t count) | 607 | void memzero_explicit(void *s, size_t count) | 
| 608 | { | 608 | { | 
| 609 | memset(s, 0, count); | 609 | memset(s, 0, count); | 
| 610 | barrier(); | 610 | barrier_data(s); | 
| 611 | } | 611 | } | 
| 612 | EXPORT_SYMBOL(memzero_explicit); | 612 | EXPORT_SYMBOL(memzero_explicit); | 
| 613 | 613 | ||
| diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index a28df5206d95..fe9a32591c24 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c | |||
| @@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, | |||
| 57 | return res + find_zero(data) + 1 - align; | 57 | return res + find_zero(data) + 1 - align; | 
| 58 | } | 58 | } | 
| 59 | res += sizeof(unsigned long); | 59 | res += sizeof(unsigned long); | 
| 60 | if (unlikely(max < sizeof(unsigned long))) | 60 | /* We already handled 'unsigned long' bytes. Did we do it all ? */ | 
| 61 | if (unlikely(max <= sizeof(unsigned long))) | ||
| 61 | break; | 62 | break; | 
| 62 | max -= sizeof(unsigned long); | 63 | max -= sizeof(unsigned long); | 
| 63 | if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) | 64 | if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) | 
| @@ -89,8 +90,15 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, | |||
| 89 | * Get the size of a NUL-terminated string in user space. | 90 | * Get the size of a NUL-terminated string in user space. | 
| 90 | * | 91 | * | 
| 91 | * Returns the size of the string INCLUDING the terminating NUL. | 92 | * Returns the size of the string INCLUDING the terminating NUL. | 
| 92 | * If the string is too long, returns 'count+1'. | 93 | * If the string is too long, returns a number larger than @count. User | 
| 94 | * has to check the return value against "> count". | ||
| 93 | * On exception (or invalid count), returns 0. | 95 | * On exception (or invalid count), returns 0. | 
| 96 | * | ||
| 97 | * NOTE! You should basically never use this function. There is | ||
| 98 | * almost never any valid case for using the length of a user space | ||
| 99 | * string, since the string can be changed at any time by other | ||
| 100 | * threads. Use "strncpy_from_user()" instead to get a stable copy | ||
| 101 | * of the string. | ||
| 94 | */ | 102 | */ | 
| 95 | long strnlen_user(const char __user *str, long count) | 103 | long strnlen_user(const char __user *str, long count) | 
| 96 | { | 104 | { | 
| diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 4abda074ea45..3c365ab6cf5f 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -537,8 +537,9 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); | |||
| 537 | * Allocates bounce buffer and returns its kernel virtual address. | 537 | * Allocates bounce buffer and returns its kernel virtual address. | 
| 538 | */ | 538 | */ | 
| 539 | 539 | ||
| 540 | phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size, | 540 | static phys_addr_t | 
| 541 | enum dma_data_direction dir) | 541 | map_single(struct device *hwdev, phys_addr_t phys, size_t size, | 
| 542 | enum dma_data_direction dir) | ||
| 542 | { | 543 | { | 
| 543 | dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); | 544 | dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); | 
| 544 | 545 | ||
