aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/random32.c39
-rw-r--r--lib/rhashtable.c12
-rw-r--r--lib/test_bpf.c63
4 files changed, 89 insertions, 28 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a28590083622..3ac43f34437b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1672,7 +1672,8 @@ config TEST_BPF
1672 against the BPF interpreter or BPF JIT compiler depending on the 1672 against the BPF interpreter or BPF JIT compiler depending on the
1673 current setting. This is in particular useful for BPF JIT compiler 1673 current setting. This is in particular useful for BPF JIT compiler
1674 development, but also to run regression tests against changes in 1674 development, but also to run regression tests against changes in
1675 the interpreter code. 1675 the interpreter code. It also enables test stubs for eBPF maps and
1676 verifier used by user space verifier testsuite.
1676 1677
1677 If unsure, say N. 1678 If unsure, say N.
1678 1679
diff --git a/lib/random32.c b/lib/random32.c
index c9b6bf3afe0c..0bee183fa18f 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -37,6 +37,7 @@
37#include <linux/jiffies.h> 37#include <linux/jiffies.h>
38#include <linux/random.h> 38#include <linux/random.h>
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <asm/unaligned.h>
40 41
41#ifdef CONFIG_RANDOM32_SELFTEST 42#ifdef CONFIG_RANDOM32_SELFTEST
42static void __init prandom_state_selftest(void); 43static void __init prandom_state_selftest(void);
@@ -96,27 +97,23 @@ EXPORT_SYMBOL(prandom_u32);
96 * This is used for pseudo-randomness with no outside seeding. 97 * This is used for pseudo-randomness with no outside seeding.
97 * For more random results, use prandom_bytes(). 98 * For more random results, use prandom_bytes().
98 */ 99 */
99void prandom_bytes_state(struct rnd_state *state, void *buf, int bytes) 100void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
100{ 101{
101 unsigned char *p = buf; 102 u8 *ptr = buf;
102 int i;
103
104 for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) {
105 u32 random = prandom_u32_state(state);
106 int j;
107 103
108 for (j = 0; j < sizeof(u32); j++) { 104 while (bytes >= sizeof(u32)) {
109 p[i + j] = random; 105 put_unaligned(prandom_u32_state(state), (u32 *) ptr);
110 random >>= BITS_PER_BYTE; 106 ptr += sizeof(u32);
111 } 107 bytes -= sizeof(u32);
112 } 108 }
113 if (i < bytes) {
114 u32 random = prandom_u32_state(state);
115 109
116 for (; i < bytes; i++) { 110 if (bytes > 0) {
117 p[i] = random; 111 u32 rem = prandom_u32_state(state);
118 random >>= BITS_PER_BYTE; 112 do {
119 } 113 *ptr++ = (u8) rem;
114 bytes--;
115 rem >>= BITS_PER_BYTE;
116 } while (bytes > 0);
120 } 117 }
121} 118}
122EXPORT_SYMBOL(prandom_bytes_state); 119EXPORT_SYMBOL(prandom_bytes_state);
@@ -126,7 +123,7 @@ EXPORT_SYMBOL(prandom_bytes_state);
126 * @buf: where to copy the pseudo-random bytes to 123 * @buf: where to copy the pseudo-random bytes to
127 * @bytes: the requested number of bytes 124 * @bytes: the requested number of bytes
128 */ 125 */
129void prandom_bytes(void *buf, int bytes) 126void prandom_bytes(void *buf, size_t bytes)
130{ 127{
131 struct rnd_state *state = &get_cpu_var(net_rand_state); 128 struct rnd_state *state = &get_cpu_var(net_rand_state);
132 129
@@ -137,7 +134,7 @@ EXPORT_SYMBOL(prandom_bytes);
137 134
138static void prandom_warmup(struct rnd_state *state) 135static void prandom_warmup(struct rnd_state *state)
139{ 136{
140 /* Calling RNG ten times to satify recurrence condition */ 137 /* Calling RNG ten times to satisfy recurrence condition */
141 prandom_u32_state(state); 138 prandom_u32_state(state);
142 prandom_u32_state(state); 139 prandom_u32_state(state);
143 prandom_u32_state(state); 140 prandom_u32_state(state);
@@ -152,7 +149,7 @@ static void prandom_warmup(struct rnd_state *state)
152 149
153static u32 __extract_hwseed(void) 150static u32 __extract_hwseed(void)
154{ 151{
155 u32 val = 0; 152 unsigned int val = 0;
156 153
157 (void)(arch_get_random_seed_int(&val) || 154 (void)(arch_get_random_seed_int(&val) ||
158 arch_get_random_int(&val)); 155 arch_get_random_int(&val));
@@ -228,7 +225,7 @@ static void __prandom_timer(unsigned long dontcare)
228 prandom_seed(entropy); 225 prandom_seed(entropy);
229 226
230 /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ 227 /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
231 expires = 40 + (prandom_u32() % 40); 228 expires = 40 + prandom_u32_max(40);
232 seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC); 229 seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
233 230
234 add_timer(&seed_timer); 231 add_timer(&seed_timer);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 16d02639d334..3943e14da628 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -297,7 +297,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
297 297
298 ASSERT_RHT_MUTEX(ht); 298 ASSERT_RHT_MUTEX(ht);
299 299
300 if (tbl->size <= HASH_MIN_SIZE) 300 if (ht->shift <= ht->p.min_shift)
301 return 0; 301 return 0;
302 302
303 ntbl = bucket_table_alloc(tbl->size / 2, flags); 303 ntbl = bucket_table_alloc(tbl->size / 2, flags);
@@ -505,9 +505,10 @@ void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
505} 505}
506EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); 506EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
507 507
508static size_t rounded_hashtable_size(unsigned int nelem) 508static size_t rounded_hashtable_size(struct rhashtable_params *params)
509{ 509{
510 return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE); 510 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
511 1UL << params->min_shift);
511} 512}
512 513
513/** 514/**
@@ -565,8 +566,11 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
565 (!params->key_len && !params->obj_hashfn)) 566 (!params->key_len && !params->obj_hashfn))
566 return -EINVAL; 567 return -EINVAL;
567 568
569 params->min_shift = max_t(size_t, params->min_shift,
570 ilog2(HASH_MIN_SIZE));
571
568 if (params->nelem_hint) 572 if (params->nelem_hint)
569 size = rounded_hashtable_size(params->nelem_hint); 573 size = rounded_hashtable_size(params);
570 574
571 tbl = bucket_table_alloc(size, GFP_KERNEL); 575 tbl = bucket_table_alloc(size, GFP_KERNEL);
572 if (tbl == NULL) 576 if (tbl == NULL)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 89e0345733bd..23e070bcf72d 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1342,6 +1342,44 @@ static struct bpf_test tests[] = {
1342 { { 0, -1 } } 1342 { { 0, -1 } }
1343 }, 1343 },
1344 { 1344 {
1345 "INT: shifts by register",
1346 .u.insns_int = {
1347 BPF_MOV64_IMM(R0, -1234),
1348 BPF_MOV64_IMM(R1, 1),
1349 BPF_ALU32_REG(BPF_RSH, R0, R1),
1350 BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1),
1351 BPF_EXIT_INSN(),
1352 BPF_MOV64_IMM(R2, 1),
1353 BPF_ALU64_REG(BPF_LSH, R0, R2),
1354 BPF_MOV32_IMM(R4, -1234),
1355 BPF_JMP_REG(BPF_JEQ, R0, R4, 1),
1356 BPF_EXIT_INSN(),
1357 BPF_ALU64_IMM(BPF_AND, R4, 63),
1358 BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */
1359 BPF_MOV64_IMM(R3, 47),
1360 BPF_ALU64_REG(BPF_ARSH, R0, R3),
1361 BPF_JMP_IMM(BPF_JEQ, R0, -617, 1),
1362 BPF_EXIT_INSN(),
1363 BPF_MOV64_IMM(R2, 1),
1364 BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */
1365 BPF_JMP_IMM(BPF_JEQ, R4, 92, 1),
1366 BPF_EXIT_INSN(),
1367 BPF_MOV64_IMM(R4, 4),
1368 BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */
1369 BPF_JMP_IMM(BPF_JEQ, R4, 64, 1),
1370 BPF_EXIT_INSN(),
1371 BPF_MOV64_IMM(R4, 5),
1372 BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */
1373 BPF_JMP_IMM(BPF_JEQ, R4, 160, 1),
1374 BPF_EXIT_INSN(),
1375 BPF_MOV64_IMM(R0, -1),
1376 BPF_EXIT_INSN(),
1377 },
1378 INTERNAL,
1379 { },
1380 { { 0, -1 } }
1381 },
1382 {
1345 "INT: DIV + ABS", 1383 "INT: DIV + ABS",
1346 .u.insns_int = { 1384 .u.insns_int = {
1347 BPF_ALU64_REG(BPF_MOV, R6, R1), 1385 BPF_ALU64_REG(BPF_MOV, R6, R1),
@@ -1697,6 +1735,27 @@ static struct bpf_test tests[] = {
1697 { }, 1735 { },
1698 { { 1, 0 } }, 1736 { { 1, 0 } },
1699 }, 1737 },
1738 {
1739 "load 64-bit immediate",
1740 .u.insns_int = {
1741 BPF_LD_IMM64(R1, 0x567800001234LL),
1742 BPF_MOV64_REG(R2, R1),
1743 BPF_MOV64_REG(R3, R2),
1744 BPF_ALU64_IMM(BPF_RSH, R2, 32),
1745 BPF_ALU64_IMM(BPF_LSH, R3, 32),
1746 BPF_ALU64_IMM(BPF_RSH, R3, 32),
1747 BPF_ALU64_IMM(BPF_MOV, R0, 0),
1748 BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1),
1749 BPF_EXIT_INSN(),
1750 BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
1751 BPF_EXIT_INSN(),
1752 BPF_ALU64_IMM(BPF_MOV, R0, 1),
1753 BPF_EXIT_INSN(),
1754 },
1755 INTERNAL,
1756 { },
1757 { { 0, 1 } }
1758 },
1700}; 1759};
1701 1760
1702static struct net_device dev; 1761static struct net_device dev;
@@ -1798,7 +1857,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
1798 break; 1857 break;
1799 1858
1800 case INTERNAL: 1859 case INTERNAL:
1801 fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL); 1860 fp = bpf_prog_alloc(bpf_prog_size(flen), 0);
1802 if (fp == NULL) { 1861 if (fp == NULL) {
1803 pr_cont("UNEXPECTED_FAIL no memory left\n"); 1862 pr_cont("UNEXPECTED_FAIL no memory left\n");
1804 *err = -ENOMEM; 1863 *err = -ENOMEM;
@@ -1835,7 +1894,7 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
1835 int runs, u64 *duration) 1894 int runs, u64 *duration)
1836{ 1895{
1837 u64 start, finish; 1896 u64 start, finish;
1838 int ret, i; 1897 int ret = 0, i;
1839 1898
1840 start = ktime_to_us(ktime_get()); 1899 start = ktime_to_us(ktime_get());
1841 1900