aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/Kconfig.kasan8
-rw-r--r--lib/cpumask.c74
-rw-r--r--lib/find_last_bit.c41
-rw-r--r--lib/percpu_counter.c6
-rw-r--r--lib/rhashtable.c11
-rw-r--r--lib/string.c2
-rw-r--r--lib/strnlen_user.c12
8 files changed, 58 insertions, 97 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 17670573dda8..ba2b0c87e65b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1281,6 +1281,7 @@ config RCU_TORTURE_TEST_SLOW_INIT_DELAY
1281 int "How much to slow down RCU grace-period initialization" 1281 int "How much to slow down RCU grace-period initialization"
1282 range 0 5 1282 range 0 5
1283 default 3 1283 default 3
1284 depends on RCU_TORTURE_TEST_SLOW_INIT
1284 help 1285 help
1285 This option specifies the number of jiffies to wait between 1286 This option specifies the number of jiffies to wait between
1286 each rcu_node structure initialization. 1287 each rcu_node structure initialization.
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 4fecaedc80a2..777eda7d1ab4 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -10,8 +10,11 @@ config KASAN
10 help 10 help
11 Enables kernel address sanitizer - runtime memory debugger, 11 Enables kernel address sanitizer - runtime memory debugger,
12 designed to find out-of-bounds accesses and use-after-free bugs. 12 designed to find out-of-bounds accesses and use-after-free bugs.
13 This is strictly debugging feature. It consumes about 1/8 13 This is strictly a debugging feature and it requires a gcc version
14 of available memory and brings about ~x3 performance slowdown. 14 of 4.9.2 or later. Detection of out of bounds accesses to stack or
15 global variables requires gcc 5.0 or later.
16 This feature consumes about 1/8 of available memory and brings about
17 ~x3 performance slowdown.
15 For better error detection enable CONFIG_STACKTRACE, 18 For better error detection enable CONFIG_STACKTRACE,
16 and add slub_debug=U to boot cmdline. 19 and add slub_debug=U to boot cmdline.
17 20
@@ -40,6 +43,7 @@ config KASAN_INLINE
40 memory accesses. This is faster than outline (in some workloads 43 memory accesses. This is faster than outline (in some workloads
41 it gives about x2 boost over outline instrumentation), but 44 it gives about x2 boost over outline instrumentation), but
42 make kernel's .text size much bigger. 45 make kernel's .text size much bigger.
46 This requires a gcc version of 5.0 or later.
43 47
44endchoice 48endchoice
45 49
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 830dd5dec40f..5f627084f2e9 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -139,64 +139,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
139#endif 139#endif
140 140
141/** 141/**
142 * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first 142 * cpumask_local_spread - select the i'th cpu with local numa cpu's first
143 *
144 * @i: index number 143 * @i: index number
145 * @numa_node: local numa_node 144 * @node: local numa_node
146 * @dstp: cpumask with the relevant cpu bit set according to the policy
147 * 145 *
148 * This function sets the cpumask according to a numa aware policy. 146 * This function selects an online CPU according to a numa aware policy;
149 * cpumask could be used as an affinity hint for the IRQ related to a 147 * local cpus are returned first, followed by non-local ones, then it
150 * queue. When the policy is to spread queues across cores - local cores 148 * wraps around.
151 * first.
152 * 149 *
153 * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set 150 * It's not very efficient, but useful for setup.
154 * the cpu bit and need to re-call the function.
155 */ 151 */
156int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) 152unsigned int cpumask_local_spread(unsigned int i, int node)
157{ 153{
158 cpumask_var_t mask;
159 int cpu; 154 int cpu;
160 int ret = 0;
161
162 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
163 return -ENOMEM;
164 155
156 /* Wrap: we always want a cpu. */
165 i %= num_online_cpus(); 157 i %= num_online_cpus();
166 158
167 if (numa_node == -1 || !cpumask_of_node(numa_node)) { 159 if (node == -1) {
168 /* Use all online cpu's for non numa aware system */ 160 for_each_cpu(cpu, cpu_online_mask)
169 cpumask_copy(mask, cpu_online_mask); 161 if (i-- == 0)
162 return cpu;
170 } else { 163 } else {
171 int n; 164 /* NUMA first. */
172 165 for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
173 cpumask_and(mask, 166 if (i-- == 0)
174 cpumask_of_node(numa_node), cpu_online_mask); 167 return cpu;
175 168
176 n = cpumask_weight(mask); 169 for_each_cpu(cpu, cpu_online_mask) {
177 if (i >= n) { 170 /* Skip NUMA nodes, done above. */
178 i -= n; 171 if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
179 172 continue;
180 /* If index > number of local cpu's, mask out local 173
181 * cpu's 174 if (i-- == 0)
182 */ 175 return cpu;
183 cpumask_andnot(mask, cpu_online_mask, mask);
184 } 176 }
185 } 177 }
186 178 BUG();
187 for_each_cpu(cpu, mask) {
188 if (--i < 0)
189 goto out;
190 }
191
192 ret = -EAGAIN;
193
194out:
195 free_cpumask_var(mask);
196
197 if (!ret)
198 cpumask_set_cpu(cpu, dstp);
199
200 return ret;
201} 179}
202EXPORT_SYMBOL(cpumask_set_cpu_local_first); 180EXPORT_SYMBOL(cpumask_local_spread);
diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c
deleted file mode 100644
index 3e3be40c6a6e..000000000000
--- a/lib/find_last_bit.c
+++ /dev/null
@@ -1,41 +0,0 @@
1/* find_last_bit.c: fallback find next bit implementation
2 *
3 * Copyright (C) 2008 IBM Corporation
4 * Written by Rusty Russell <rusty@rustcorp.com.au>
5 * (Inspired by David Howell's find_next_bit implementation)
6 *
7 * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
8 * size and improve performance, 2015.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/bitops.h>
17#include <linux/bitmap.h>
18#include <linux/export.h>
19#include <linux/kernel.h>
20
21#ifndef find_last_bit
22
23unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
24{
25 if (size) {
26 unsigned long val = BITMAP_LAST_WORD_MASK(size);
27 unsigned long idx = (size-1) / BITS_PER_LONG;
28
29 do {
30 val &= addr[idx];
31 if (val)
32 return idx * BITS_PER_LONG + __fls(val);
33
34 val = ~0ul;
35 } while (idx--);
36 }
37 return size;
38}
39EXPORT_SYMBOL(find_last_bit);
40
41#endif
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 48144cdae819..f051d69f0910 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -197,13 +197,13 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
197 * Compare counter against given value. 197 * Compare counter against given value.
198 * Return 1 if greater, 0 if equal and -1 if less 198 * Return 1 if greater, 0 if equal and -1 if less
199 */ 199 */
200int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) 200int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
201{ 201{
202 s64 count; 202 s64 count;
203 203
204 count = percpu_counter_read(fbc); 204 count = percpu_counter_read(fbc);
205 /* Check to see if rough count will be sufficient for comparison */ 205 /* Check to see if rough count will be sufficient for comparison */
206 if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) { 206 if (abs(count - rhs) > (batch * num_online_cpus())) {
207 if (count > rhs) 207 if (count > rhs)
208 return 1; 208 return 1;
209 else 209 else
@@ -218,7 +218,7 @@ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
218 else 218 else
219 return 0; 219 return 0;
220} 220}
221EXPORT_SYMBOL(percpu_counter_compare); 221EXPORT_SYMBOL(__percpu_counter_compare);
222 222
223static int __init percpu_counter_startup(void) 223static int __init percpu_counter_startup(void)
224{ 224{
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index b28df4019ade..4396434e4715 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -14,6 +14,7 @@
14 * published by the Free Software Foundation. 14 * published by the Free Software Foundation.
15 */ 15 */
16 16
17#include <linux/atomic.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/log2.h> 20#include <linux/log2.h>
@@ -446,6 +447,10 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
446 if (key && rhashtable_lookup_fast(ht, key, ht->p)) 447 if (key && rhashtable_lookup_fast(ht, key, ht->p))
447 goto exit; 448 goto exit;
448 449
450 err = -E2BIG;
451 if (unlikely(rht_grow_above_max(ht, tbl)))
452 goto exit;
453
449 err = -EAGAIN; 454 err = -EAGAIN;
450 if (rhashtable_check_elasticity(ht, tbl, hash) || 455 if (rhashtable_check_elasticity(ht, tbl, hash) ||
451 rht_grow_above_100(ht, tbl)) 456 rht_grow_above_100(ht, tbl))
@@ -738,6 +743,12 @@ int rhashtable_init(struct rhashtable *ht,
738 if (params->max_size) 743 if (params->max_size)
739 ht->p.max_size = rounddown_pow_of_two(params->max_size); 744 ht->p.max_size = rounddown_pow_of_two(params->max_size);
740 745
746 if (params->insecure_max_entries)
747 ht->p.insecure_max_entries =
748 rounddown_pow_of_two(params->insecure_max_entries);
749 else
750 ht->p.insecure_max_entries = ht->p.max_size * 2;
751
741 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); 752 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
742 753
743 /* The maximum (not average) chain length grows with the 754 /* The maximum (not average) chain length grows with the
diff --git a/lib/string.c b/lib/string.c
index a5792019193c..bb3d4b6993c4 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset);
607void memzero_explicit(void *s, size_t count) 607void memzero_explicit(void *s, size_t count)
608{ 608{
609 memset(s, 0, count); 609 memset(s, 0, count);
610 barrier(); 610 barrier_data(s);
611} 611}
612EXPORT_SYMBOL(memzero_explicit); 612EXPORT_SYMBOL(memzero_explicit);
613 613
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index a28df5206d95..fe9a32591c24 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
57 return res + find_zero(data) + 1 - align; 57 return res + find_zero(data) + 1 - align;
58 } 58 }
59 res += sizeof(unsigned long); 59 res += sizeof(unsigned long);
60 if (unlikely(max < sizeof(unsigned long))) 60 /* We already handled 'unsigned long' bytes. Did we do it all ? */
61 if (unlikely(max <= sizeof(unsigned long)))
61 break; 62 break;
62 max -= sizeof(unsigned long); 63 max -= sizeof(unsigned long);
63 if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) 64 if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
@@ -89,8 +90,15 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
89 * Get the size of a NUL-terminated string in user space. 90 * Get the size of a NUL-terminated string in user space.
90 * 91 *
91 * Returns the size of the string INCLUDING the terminating NUL. 92 * Returns the size of the string INCLUDING the terminating NUL.
92 * If the string is too long, returns 'count+1'. 93 * If the string is too long, returns a number larger than @count. User
94 * has to check the return value against "> count".
93 * On exception (or invalid count), returns 0. 95 * On exception (or invalid count), returns 0.
96 *
97 * NOTE! You should basically never use this function. There is
98 * almost never any valid case for using the length of a user space
99 * string, since the string can be changed at any time by other
100 * threads. Use "strncpy_from_user()" instead to get a stable copy
101 * of the string.
94 */ 102 */
95long strnlen_user(const char __user *str, long count) 103long strnlen_user(const char __user *str, long count)
96{ 104{