aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 12:38:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 12:38:14 -0400
commitae045e2455429c418a418a3376301a9e5753a0a8 (patch)
treeb445bdeecd3f38aa0d0a29c9585cee49e4ccb0f1 /lib
parentf4f142ed4ef835709c7e6d12eaca10d190bcebed (diff)
parentd247b6ab3ce6dd43665780865ec5fa145d9ab6bd (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Steady transitioning of the BPF instructure to a generic spot so all kernel subsystems can make use of it, from Alexei Starovoitov. 2) SFC driver supports busy polling, from Alexandre Rames. 3) Take advantage of hash table in UDP multicast delivery, from David Held. 4) Lighten locking, in particular by getting rid of the LRU lists, in inet frag handling. From Florian Westphal. 5) Add support for various RFC6458 control messages in SCTP, from Geir Ola Vaagland. 6) Allow to filter bridge forwarding database dumps by device, from Jamal Hadi Salim. 7) virtio-net also now supports busy polling, from Jason Wang. 8) Some low level optimization tweaks in pktgen from Jesper Dangaard Brouer. 9) Add support for ipv6 address generation modes, so that userland can have some input into the process. From Jiri Pirko. 10) Consolidate common TCP connection request code in ipv4 and ipv6, from Octavian Purdila. 11) New ARP packet logger in netfilter, from Pablo Neira Ayuso. 12) Generic resizable RCU hash table, with intial users in netlink and nftables. From Thomas Graf. 13) Maintain a name assignment type so that userspace can see where a network device name came from (enumerated by kernel, assigned explicitly by userspace, etc.) From Tom Gundersen. 14) Automatic flow label generation on transmit in ipv6, from Tom Herbert. 15) New packet timestamping facilities from Willem de Bruijn, meant to assist in measuring latencies going into/out-of the packet scheduler, latency from TCP data transmission to ACK, etc" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1536 commits) cxgb4 : Disable recursive mailbox commands when enabling vi net: reduce USB network driver config options. tg3: Modify tg3_tso_bug() to handle multiple TX rings amd-xgbe: Perform phy connect/disconnect at dev open/stop amd-xgbe: Use dma_set_mask_and_coherent to set DMA mask net: sun4i-emac: fix memory leak on bad packet sctp: fix possible seqlock seadlock in sctp_packet_transmit() Revert "net: phy: Set the driver when registering an MDIO bus device" cxgb4vf: Turn off SGE RX/TX Callback Timers and interrupts in PCI shutdown routine team: Simplify return path of team_newlink bridge: Update outdated comment on promiscuous mode net-timestamp: ACK timestamp for bytestreams net-timestamp: TCP timestamping net-timestamp: SCHED timestamp on entering packet scheduler net-timestamp: add key to disambiguate concurrent datagrams net-timestamp: move timestamp flags out of sk_flags net-timestamp: extend SCM_TIMESTAMPING ancillary data struct cxgb4i : Move stray CPL definitions to cxgb4 driver tcp: reduce spurious retransmits due to transient SACK reneging qlcnic: Initialize dcbnl_ops before register_netdev ...
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug8
-rw-r--r--lib/Makefile2
-rw-r--r--lib/crc32.c153
-rw-r--r--lib/dynamic_debug.c8
-rw-r--r--lib/iovec.c4
-rw-r--r--lib/net_utils.c10
-rw-r--r--lib/random32.c49
-rw-r--r--lib/rhashtable.c797
-rw-r--r--lib/test_bpf.c28
9 files changed, 935 insertions, 124 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1f630ad31fc2..cfe7df8f62cc 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1536,6 +1536,14 @@ config TEST_STRING_HELPERS
1536config TEST_KSTRTOX 1536config TEST_KSTRTOX
1537 tristate "Test kstrto*() family of functions at runtime" 1537 tristate "Test kstrto*() family of functions at runtime"
1538 1538
1539config TEST_RHASHTABLE
1540 bool "Perform selftest on resizable hash table"
1541 default n
1542 help
1543 Enable this option to test the rhashtable functions at boot.
1544
1545 If unsure, say N.
1546
1539endmenu # runtime tests 1547endmenu # runtime tests
1540 1548
1541config PROVIDE_OHCI1394_DMA_INIT 1549config PROVIDE_OHCI1394_DMA_INIT
diff --git a/lib/Makefile b/lib/Makefile
index 230b4b1456d6..8427df95dade 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ 27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ 28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
29 percpu-refcount.o percpu_ida.o hash.o 29 percpu-refcount.o percpu_ida.o hash.o rhashtable.o
30obj-y += string_helpers.o 30obj-y += string_helpers.o
31obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 31obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
32obj-y += kstrtox.o 32obj-y += kstrtox.o
diff --git a/lib/crc32.c b/lib/crc32.c
index 21a7b2135af6..9a907d489d95 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -50,34 +50,10 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
50MODULE_DESCRIPTION("Various CRC32 calculations"); 50MODULE_DESCRIPTION("Various CRC32 calculations");
51MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
52 52
53#define GF2_DIM 32
54
55static u32 gf2_matrix_times(u32 *mat, u32 vec)
56{
57 u32 sum = 0;
58
59 while (vec) {
60 if (vec & 1)
61 sum ^= *mat;
62 vec >>= 1;
63 mat++;
64 }
65
66 return sum;
67}
68
69static void gf2_matrix_square(u32 *square, u32 *mat)
70{
71 int i;
72
73 for (i = 0; i < GF2_DIM; i++)
74 square[i] = gf2_matrix_times(mat, mat[i]);
75}
76
77#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8 53#if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
78 54
79/* implements slicing-by-4 or slicing-by-8 algorithm */ 55/* implements slicing-by-4 or slicing-by-8 algorithm */
80static inline u32 56static inline u32 __pure
81crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) 57crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
82{ 58{
83# ifdef __LITTLE_ENDIAN 59# ifdef __LITTLE_ENDIAN
@@ -155,51 +131,6 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
155} 131}
156#endif 132#endif
157 133
158/* For conditions of distribution and use, see copyright notice in zlib.h */
159static u32 crc32_generic_combine(u32 crc1, u32 crc2, size_t len2,
160 u32 polynomial)
161{
162 u32 even[GF2_DIM]; /* Even-power-of-two zeros operator */
163 u32 odd[GF2_DIM]; /* Odd-power-of-two zeros operator */
164 u32 row;
165 int i;
166
167 if (len2 <= 0)
168 return crc1;
169
170 /* Put operator for one zero bit in odd */
171 odd[0] = polynomial;
172 row = 1;
173 for (i = 1; i < GF2_DIM; i++) {
174 odd[i] = row;
175 row <<= 1;
176 }
177
178 gf2_matrix_square(even, odd); /* Put operator for two zero bits in even */
179 gf2_matrix_square(odd, even); /* Put operator for four zero bits in odd */
180
181 /* Apply len2 zeros to crc1 (first square will put the operator for one
182 * zero byte, eight zero bits, in even).
183 */
184 do {
185 /* Apply zeros operator for this bit of len2 */
186 gf2_matrix_square(even, odd);
187 if (len2 & 1)
188 crc1 = gf2_matrix_times(even, crc1);
189 len2 >>= 1;
190 /* If no more bits set, then done */
191 if (len2 == 0)
192 break;
193 /* Another iteration of the loop with odd and even swapped */
194 gf2_matrix_square(odd, even);
195 if (len2 & 1)
196 crc1 = gf2_matrix_times(odd, crc1);
197 len2 >>= 1;
198 } while (len2 != 0);
199
200 crc1 ^= crc2;
201 return crc1;
202}
203 134
204/** 135/**
205 * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II 136 * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
@@ -271,19 +202,81 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
271 (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); 202 (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
272} 203}
273#endif 204#endif
274u32 __pure crc32_le_combine(u32 crc1, u32 crc2, size_t len2) 205EXPORT_SYMBOL(crc32_le);
206EXPORT_SYMBOL(__crc32c_le);
207
208/*
209 * This multiplies the polynomials x and y modulo the given modulus.
210 * This follows the "little-endian" CRC convention that the lsbit
211 * represents the highest power of x, and the msbit represents x^0.
212 */
213static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
275{ 214{
276 return crc32_generic_combine(crc1, crc2, len2, CRCPOLY_LE); 215 u32 product = x & 1 ? y : 0;
216 int i;
217
218 for (i = 0; i < 31; i++) {
219 product = (product >> 1) ^ (product & 1 ? modulus : 0);
220 x >>= 1;
221 product ^= x & 1 ? y : 0;
222 }
223
224 return product;
277} 225}
278 226
279u32 __pure __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2) 227/**
228 * crc32_generic_shift - Append len 0 bytes to crc, in logarithmic time
229 * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient)
230 * @len: The number of bytes. @crc is multiplied by x^(8*@len)
231 * @polynomial: The modulus used to reduce the result to 32 bits.
232 *
233 * It's possible to parallelize CRC computations by computing a CRC
234 * over separate ranges of a buffer, then summing them.
235 * This shifts the given CRC by 8*len bits (i.e. produces the same effect
236 * as appending len bytes of zero to the data), in time proportional
237 * to log(len).
238 */
239static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
240 u32 polynomial)
280{ 241{
281 return crc32_generic_combine(crc1, crc2, len2, CRC32C_POLY_LE); 242 u32 power = polynomial; /* CRC of x^32 */
243 int i;
244
245 /* Shift up to 32 bits in the simple linear way */
246 for (i = 0; i < 8 * (int)(len & 3); i++)
247 crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0);
248
249 len >>= 2;
250 if (!len)
251 return crc;
252
253 for (;;) {
254 /* "power" is x^(2^i), modulo the polynomial */
255 if (len & 1)
256 crc = gf2_multiply(crc, power, polynomial);
257
258 len >>= 1;
259 if (!len)
260 break;
261
262 /* Square power, advancing to x^(2^(i+1)) */
263 power = gf2_multiply(power, power, polynomial);
264 }
265
266 return crc;
282} 267}
283EXPORT_SYMBOL(crc32_le); 268
284EXPORT_SYMBOL(crc32_le_combine); 269u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
285EXPORT_SYMBOL(__crc32c_le); 270{
286EXPORT_SYMBOL(__crc32c_le_combine); 271 return crc32_generic_shift(crc, len, CRCPOLY_LE);
272}
273
274u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
275{
276 return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
277}
278EXPORT_SYMBOL(crc32_le_shift);
279EXPORT_SYMBOL(__crc32c_le_shift);
287 280
288/** 281/**
289 * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 282 * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
@@ -351,7 +344,7 @@ EXPORT_SYMBOL(crc32_be);
351#ifdef CONFIG_CRC32_SELFTEST 344#ifdef CONFIG_CRC32_SELFTEST
352 345
353/* 4096 random bytes */ 346/* 4096 random bytes */
354static u8 __attribute__((__aligned__(8))) test_buf[] = 347static u8 const __aligned(8) test_buf[] __initconst =
355{ 348{
356 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30, 349 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
357 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4, 350 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
@@ -875,7 +868,7 @@ static struct crc_test {
875 u32 crc_le; /* expected crc32_le result */ 868 u32 crc_le; /* expected crc32_le result */
876 u32 crc_be; /* expected crc32_be result */ 869 u32 crc_be; /* expected crc32_be result */
877 u32 crc32c_le; /* expected crc32c_le result */ 870 u32 crc32c_le; /* expected crc32c_le result */
878} test[] = 871} const test[] __initconst =
879{ 872{
880 {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c}, 873 {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
881 {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca}, 874 {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 7288e38e1757..c9afbe2c445a 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -614,13 +614,15 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
614 char buf[PREFIX_SIZE]; 614 char buf[PREFIX_SIZE];
615 615
616 res = dev_printk_emit(7, dev->dev.parent, 616 res = dev_printk_emit(7, dev->dev.parent,
617 "%s%s %s %s: %pV", 617 "%s%s %s %s%s: %pV",
618 dynamic_emit_prefix(descriptor, buf), 618 dynamic_emit_prefix(descriptor, buf),
619 dev_driver_string(dev->dev.parent), 619 dev_driver_string(dev->dev.parent),
620 dev_name(dev->dev.parent), 620 dev_name(dev->dev.parent),
621 netdev_name(dev), &vaf); 621 netdev_name(dev), netdev_reg_state(dev),
622 &vaf);
622 } else if (dev) { 623 } else if (dev) {
623 res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf); 624 res = printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
625 netdev_reg_state(dev), &vaf);
624 } else { 626 } else {
625 res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf); 627 res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
626 } 628 }
diff --git a/lib/iovec.c b/lib/iovec.c
index 7a7c2da4cddf..df3abd1eaa4a 100644
--- a/lib/iovec.c
+++ b/lib/iovec.c
@@ -85,6 +85,10 @@ EXPORT_SYMBOL(memcpy_toiovecend);
85int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, 85int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
86 int offset, int len) 86 int offset, int len)
87{ 87{
88 /* No data? Done! */
89 if (len == 0)
90 return 0;
91
88 /* Skip over the finished iovecs */ 92 /* Skip over the finished iovecs */
89 while (offset >= iov->iov_len) { 93 while (offset >= iov->iov_len) {
90 offset -= iov->iov_len; 94 offset -= iov->iov_len;
diff --git a/lib/net_utils.c b/lib/net_utils.c
index 2e3c52c8d050..148fc6e99ef6 100644
--- a/lib/net_utils.c
+++ b/lib/net_utils.c
@@ -3,24 +3,24 @@
3#include <linux/ctype.h> 3#include <linux/ctype.h>
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5 5
6int mac_pton(const char *s, u8 *mac) 6bool mac_pton(const char *s, u8 *mac)
7{ 7{
8 int i; 8 int i;
9 9
10 /* XX:XX:XX:XX:XX:XX */ 10 /* XX:XX:XX:XX:XX:XX */
11 if (strlen(s) < 3 * ETH_ALEN - 1) 11 if (strlen(s) < 3 * ETH_ALEN - 1)
12 return 0; 12 return false;
13 13
14 /* Don't dirty result unless string is valid MAC. */ 14 /* Don't dirty result unless string is valid MAC. */
15 for (i = 0; i < ETH_ALEN; i++) { 15 for (i = 0; i < ETH_ALEN; i++) {
16 if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1])) 16 if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1]))
17 return 0; 17 return false;
18 if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':') 18 if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
19 return 0; 19 return false;
20 } 20 }
21 for (i = 0; i < ETH_ALEN; i++) { 21 for (i = 0; i < ETH_ALEN; i++) {
22 mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]); 22 mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]);
23 } 23 }
24 return 1; 24 return true;
25} 25}
26EXPORT_SYMBOL(mac_pton); 26EXPORT_SYMBOL(mac_pton);
diff --git a/lib/random32.c b/lib/random32.c
index fa5da61ce7ad..c9b6bf3afe0c 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -40,6 +40,10 @@
40 40
41#ifdef CONFIG_RANDOM32_SELFTEST 41#ifdef CONFIG_RANDOM32_SELFTEST
42static void __init prandom_state_selftest(void); 42static void __init prandom_state_selftest(void);
43#else
44static inline void prandom_state_selftest(void)
45{
46}
43#endif 47#endif
44 48
45static DEFINE_PER_CPU(struct rnd_state, net_rand_state); 49static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
@@ -53,8 +57,7 @@ static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
53 */ 57 */
54u32 prandom_u32_state(struct rnd_state *state) 58u32 prandom_u32_state(struct rnd_state *state)
55{ 59{
56#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b) 60#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
57
58 state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U); 61 state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U);
59 state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U); 62 state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U);
60 state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U); 63 state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U);
@@ -147,21 +150,25 @@ static void prandom_warmup(struct rnd_state *state)
147 prandom_u32_state(state); 150 prandom_u32_state(state);
148} 151}
149 152
150static void prandom_seed_very_weak(struct rnd_state *state, u32 seed) 153static u32 __extract_hwseed(void)
151{ 154{
152 /* Note: This sort of seeding is ONLY used in test cases and 155 u32 val = 0;
153 * during boot at the time from core_initcall until late_initcall 156
154 * as we don't have a stronger entropy source available yet. 157 (void)(arch_get_random_seed_int(&val) ||
155 * After late_initcall, we reseed entire state, we have to (!), 158 arch_get_random_int(&val));
156 * otherwise an attacker just needs to search 32 bit space to 159
157 * probe for our internal 128 bit state if he knows a couple 160 return val;
158 * of prandom32 outputs! 161}
159 */ 162
160#define LCG(x) ((x) * 69069U) /* super-duper LCG */ 163static void prandom_seed_early(struct rnd_state *state, u32 seed,
161 state->s1 = __seed(LCG(seed), 2U); 164 bool mix_with_hwseed)
162 state->s2 = __seed(LCG(state->s1), 8U); 165{
163 state->s3 = __seed(LCG(state->s2), 16U); 166#define LCG(x) ((x) * 69069U) /* super-duper LCG */
164 state->s4 = __seed(LCG(state->s3), 128U); 167#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
168 state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
169 state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
170 state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
171 state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
165} 172}
166 173
167/** 174/**
@@ -194,14 +201,13 @@ static int __init prandom_init(void)
194{ 201{
195 int i; 202 int i;
196 203
197#ifdef CONFIG_RANDOM32_SELFTEST
198 prandom_state_selftest(); 204 prandom_state_selftest();
199#endif
200 205
201 for_each_possible_cpu(i) { 206 for_each_possible_cpu(i) {
202 struct rnd_state *state = &per_cpu(net_rand_state,i); 207 struct rnd_state *state = &per_cpu(net_rand_state,i);
208 u32 weak_seed = (i + jiffies) ^ random_get_entropy();
203 209
204 prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy()); 210 prandom_seed_early(state, weak_seed, true);
205 prandom_warmup(state); 211 prandom_warmup(state);
206 } 212 }
207 213
@@ -210,6 +216,7 @@ static int __init prandom_init(void)
210core_initcall(prandom_init); 216core_initcall(prandom_init);
211 217
212static void __prandom_timer(unsigned long dontcare); 218static void __prandom_timer(unsigned long dontcare);
219
213static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0); 220static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
214 221
215static void __prandom_timer(unsigned long dontcare) 222static void __prandom_timer(unsigned long dontcare)
@@ -419,7 +426,7 @@ static void __init prandom_state_selftest(void)
419 for (i = 0; i < ARRAY_SIZE(test1); i++) { 426 for (i = 0; i < ARRAY_SIZE(test1); i++) {
420 struct rnd_state state; 427 struct rnd_state state;
421 428
422 prandom_seed_very_weak(&state, test1[i].seed); 429 prandom_seed_early(&state, test1[i].seed, false);
423 prandom_warmup(&state); 430 prandom_warmup(&state);
424 431
425 if (test1[i].result != prandom_u32_state(&state)) 432 if (test1[i].result != prandom_u32_state(&state))
@@ -434,7 +441,7 @@ static void __init prandom_state_selftest(void)
434 for (i = 0; i < ARRAY_SIZE(test2); i++) { 441 for (i = 0; i < ARRAY_SIZE(test2); i++) {
435 struct rnd_state state; 442 struct rnd_state state;
436 443
437 prandom_seed_very_weak(&state, test2[i].seed); 444 prandom_seed_early(&state, test2[i].seed, false);
438 prandom_warmup(&state); 445 prandom_warmup(&state);
439 446
440 for (j = 0; j < test2[i].iteration - 1; j++) 447 for (j = 0; j < test2[i].iteration - 1; j++)
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
new file mode 100644
index 000000000000..e6940cf16628
--- /dev/null
+++ b/lib/rhashtable.c
@@ -0,0 +1,797 @@
1/*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
4 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6 *
7 * Based on the following paper:
8 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9 *
10 * Code partially derived from nft_hash
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/log2.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/mm.h>
23#include <linux/hash.h>
24#include <linux/random.h>
25#include <linux/rhashtable.h>
26#include <linux/log2.h>
27
28#define HASH_DEFAULT_SIZE 64UL
29#define HASH_MIN_SIZE 4UL
30
31#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
32
33#ifdef CONFIG_PROVE_LOCKING
34int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
35{
36 return ht->p.mutex_is_held();
37}
38EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
39#endif
40
41/**
42 * rht_obj - cast hash head to outer object
43 * @ht: hash table
44 * @he: hashed node
45 */
46void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
47{
48 return (void *) he - ht->p.head_offset;
49}
50EXPORT_SYMBOL_GPL(rht_obj);
51
52static u32 __hashfn(const struct rhashtable *ht, const void *key,
53 u32 len, u32 hsize)
54{
55 u32 h;
56
57 h = ht->p.hashfn(key, len, ht->p.hash_rnd);
58
59 return h & (hsize - 1);
60}
61
62/**
63 * rhashtable_hashfn - compute hash for key of given length
64 * @ht: hash table to compuate for
65 * @key: pointer to key
66 * @len: length of key
67 *
68 * Computes the hash value using the hash function provided in the 'hashfn'
69 * of struct rhashtable_params. The returned value is guaranteed to be
70 * smaller than the number of buckets in the hash table.
71 */
72u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
73{
74 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
75
76 return __hashfn(ht, key, len, tbl->size);
77}
78EXPORT_SYMBOL_GPL(rhashtable_hashfn);
79
80static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
81{
82 if (unlikely(!ht->p.key_len)) {
83 u32 h;
84
85 h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
86
87 return h & (hsize - 1);
88 }
89
90 return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
91}
92
93/**
94 * rhashtable_obj_hashfn - compute hash for hashed object
95 * @ht: hash table to compuate for
96 * @ptr: pointer to hashed object
97 *
98 * Computes the hash value using the hash function `hashfn` respectively
99 * 'obj_hashfn' depending on whether the hash table is set up to work with
100 * a fixed length key. The returned value is guaranteed to be smaller than
101 * the number of buckets in the hash table.
102 */
103u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
104{
105 struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
106
107 return obj_hashfn(ht, ptr, tbl->size);
108}
109EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
110
111static u32 head_hashfn(const struct rhashtable *ht,
112 const struct rhash_head *he, u32 hsize)
113{
114 return obj_hashfn(ht, rht_obj(ht, he), hsize);
115}
116
117static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
118{
119 struct bucket_table *tbl;
120 size_t size;
121
122 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
123 tbl = kzalloc(size, flags);
124 if (tbl == NULL)
125 tbl = vzalloc(size);
126
127 if (tbl == NULL)
128 return NULL;
129
130 tbl->size = nbuckets;
131
132 return tbl;
133}
134
135static void bucket_table_free(const struct bucket_table *tbl)
136{
137 kvfree(tbl);
138}
139
140/**
141 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
142 * @ht: hash table
143 * @new_size: new table size
144 */
145bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
146{
147 /* Expand table when exceeding 75% load */
148 return ht->nelems > (new_size / 4 * 3);
149}
150EXPORT_SYMBOL_GPL(rht_grow_above_75);
151
152/**
153 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
154 * @ht: hash table
155 * @new_size: new table size
156 */
157bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
158{
159 /* Shrink table beneath 30% load */
160 return ht->nelems < (new_size * 3 / 10);
161}
162EXPORT_SYMBOL_GPL(rht_shrink_below_30);
163
164static void hashtable_chain_unzip(const struct rhashtable *ht,
165 const struct bucket_table *new_tbl,
166 struct bucket_table *old_tbl, size_t n)
167{
168 struct rhash_head *he, *p, *next;
169 unsigned int h;
170
171 /* Old bucket empty, no work needed. */
172 p = rht_dereference(old_tbl->buckets[n], ht);
173 if (!p)
174 return;
175
176 /* Advance the old bucket pointer one or more times until it
177 * reaches a node that doesn't hash to the same bucket as the
178 * previous node p. Call the previous node p;
179 */
180 h = head_hashfn(ht, p, new_tbl->size);
181 rht_for_each(he, p->next, ht) {
182 if (head_hashfn(ht, he, new_tbl->size) != h)
183 break;
184 p = he;
185 }
186 RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
187
188 /* Find the subsequent node which does hash to the same
189 * bucket as node P, or NULL if no such node exists.
190 */
191 next = NULL;
192 if (he) {
193 rht_for_each(he, he->next, ht) {
194 if (head_hashfn(ht, he, new_tbl->size) == h) {
195 next = he;
196 break;
197 }
198 }
199 }
200
201 /* Set p's next pointer to that subsequent node pointer,
202 * bypassing the nodes which do not hash to p's bucket
203 */
204 RCU_INIT_POINTER(p->next, next);
205}
206
207/**
208 * rhashtable_expand - Expand hash table while allowing concurrent lookups
209 * @ht: the hash table to expand
210 * @flags: allocation flags
211 *
212 * A secondary bucket array is allocated and the hash entries are migrated
213 * while keeping them on both lists until the end of the RCU grace period.
214 *
215 * This function may only be called in a context where it is safe to call
216 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
217 *
218 * The caller must ensure that no concurrent table mutations take place.
219 * It is however valid to have concurrent lookups if they are RCU protected.
220 */
221int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
222{
223 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
224 struct rhash_head *he;
225 unsigned int i, h;
226 bool complete;
227
228 ASSERT_RHT_MUTEX(ht);
229
230 if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
231 return 0;
232
233 new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
234 if (new_tbl == NULL)
235 return -ENOMEM;
236
237 ht->shift++;
238
239 /* For each new bucket, search the corresponding old bucket
240 * for the first entry that hashes to the new bucket, and
241 * link the new bucket to that entry. Since all the entries
242 * which will end up in the new bucket appear in the same
243 * old bucket, this constructs an entirely valid new hash
244 * table, but with multiple buckets "zipped" together into a
245 * single imprecise chain.
246 */
247 for (i = 0; i < new_tbl->size; i++) {
248 h = i & (old_tbl->size - 1);
249 rht_for_each(he, old_tbl->buckets[h], ht) {
250 if (head_hashfn(ht, he, new_tbl->size) == i) {
251 RCU_INIT_POINTER(new_tbl->buckets[i], he);
252 break;
253 }
254 }
255 }
256
257 /* Publish the new table pointer. Lookups may now traverse
258 * the new table, but they will not benefit from any
259 * additional efficiency until later steps unzip the buckets.
260 */
261 rcu_assign_pointer(ht->tbl, new_tbl);
262
263 /* Unzip interleaved hash chains */
264 do {
265 /* Wait for readers. All new readers will see the new
266 * table, and thus no references to the old table will
267 * remain.
268 */
269 synchronize_rcu();
270
271 /* For each bucket in the old table (each of which
272 * contains items from multiple buckets of the new
273 * table): ...
274 */
275 complete = true;
276 for (i = 0; i < old_tbl->size; i++) {
277 hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
278 if (old_tbl->buckets[i] != NULL)
279 complete = false;
280 }
281 } while (!complete);
282
283 bucket_table_free(old_tbl);
284 return 0;
285}
286EXPORT_SYMBOL_GPL(rhashtable_expand);
287
288/**
289 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
290 * @ht: the hash table to shrink
291 * @flags: allocation flags
292 *
293 * This function may only be called in a context where it is safe to call
294 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
295 *
296 * The caller must ensure that no concurrent table mutations take place.
297 * It is however valid to have concurrent lookups if they are RCU protected.
298 */
299int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
300{
301 struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
302 struct rhash_head __rcu **pprev;
303 unsigned int i;
304
305 ASSERT_RHT_MUTEX(ht);
306
307 if (tbl->size <= HASH_MIN_SIZE)
308 return 0;
309
310 ntbl = bucket_table_alloc(tbl->size / 2, flags);
311 if (ntbl == NULL)
312 return -ENOMEM;
313
314 ht->shift--;
315
316 /* Link each bucket in the new table to the first bucket
317 * in the old table that contains entries which will hash
318 * to the new bucket.
319 */
320 for (i = 0; i < ntbl->size; i++) {
321 ntbl->buckets[i] = tbl->buckets[i];
322
323 /* Link each bucket in the new table to the first bucket
324 * in the old table that contains entries which will hash
325 * to the new bucket.
326 */
327 for (pprev = &ntbl->buckets[i]; *pprev != NULL;
328 pprev = &rht_dereference(*pprev, ht)->next)
329 ;
330 RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
331 }
332
333 /* Publish the new, valid hash table */
334 rcu_assign_pointer(ht->tbl, ntbl);
335
336 /* Wait for readers. No new readers will have references to the
337 * old hash table.
338 */
339 synchronize_rcu();
340
341 bucket_table_free(tbl);
342
343 return 0;
344}
345EXPORT_SYMBOL_GPL(rhashtable_shrink);
346
347/**
348 * rhashtable_insert - insert object into hash hash table
349 * @ht: hash table
350 * @obj: pointer to hash head inside object
351 * @flags: allocation flags (table expansion)
352 *
353 * Will automatically grow the table via rhashtable_expand() if the the
354 * grow_decision function specified at rhashtable_init() returns true.
355 *
356 * The caller must ensure that no concurrent table mutations occur. It is
357 * however valid to have concurrent lookups if they are RCU protected.
358 */
359void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
360 gfp_t flags)
361{
362 struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
363 u32 hash;
364
365 ASSERT_RHT_MUTEX(ht);
366
367 hash = head_hashfn(ht, obj, tbl->size);
368 RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
369 rcu_assign_pointer(tbl->buckets[hash], obj);
370 ht->nelems++;
371
372 if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
373 rhashtable_expand(ht, flags);
374}
375EXPORT_SYMBOL_GPL(rhashtable_insert);
376
377/**
378 * rhashtable_remove_pprev - remove object from hash table given previous element
379 * @ht: hash table
380 * @obj: pointer to hash head inside object
381 * @pprev: pointer to previous element
382 * @flags: allocation flags (table expansion)
383 *
384 * Identical to rhashtable_remove() but caller is alreayd aware of the element
385 * in front of the element to be deleted. This is in particular useful for
386 * deletion when combined with walking or lookup.
387 */
388void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
389 struct rhash_head **pprev, gfp_t flags)
390{
391 struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
392
393 ASSERT_RHT_MUTEX(ht);
394
395 RCU_INIT_POINTER(*pprev, obj->next);
396 ht->nelems--;
397
398 if (ht->p.shrink_decision &&
399 ht->p.shrink_decision(ht, tbl->size))
400 rhashtable_shrink(ht, flags);
401}
402EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
403
404/**
405 * rhashtable_remove - remove object from hash table
406 * @ht: hash table
407 * @obj: pointer to hash head inside object
408 * @flags: allocation flags (table expansion)
409 *
410 * Since the hash chain is single linked, the removal operation needs to
411 * walk the bucket chain upon removal. The removal operation is thus
412 * considerable slow if the hash table is not correctly sized.
413 *
414 * Will automatically shrink the table via rhashtable_expand() if the the
415 * shrink_decision function specified at rhashtable_init() returns true.
416 *
417 * The caller must ensure that no concurrent table mutations occur. It is
418 * however valid to have concurrent lookups if they are RCU protected.
419 */
420bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
421 gfp_t flags)
422{
423 struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
424 struct rhash_head __rcu **pprev;
425 struct rhash_head *he;
426 u32 h;
427
428 ASSERT_RHT_MUTEX(ht);
429
430 h = head_hashfn(ht, obj, tbl->size);
431
432 pprev = &tbl->buckets[h];
433 rht_for_each(he, tbl->buckets[h], ht) {
434 if (he != obj) {
435 pprev = &he->next;
436 continue;
437 }
438
439 rhashtable_remove_pprev(ht, he, pprev, flags);
440 return true;
441 }
442
443 return false;
444}
445EXPORT_SYMBOL_GPL(rhashtable_remove);
446
447/**
448 * rhashtable_lookup - lookup key in hash table
449 * @ht: hash table
450 * @key: pointer to key
451 *
452 * Computes the hash value for the key and traverses the bucket chain looking
453 * for a entry with an identical key. The first matching entry is returned.
454 *
455 * This lookup function may only be used for fixed key hash table (key_len
456 * paramter set). It will BUG() if used inappropriately.
457 *
458 * Lookups may occur in parallel with hash mutations as long as the lookup is
459 * guarded by rcu_read_lock(). The caller must take care of this.
460 */
461void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
462{
463 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
464 struct rhash_head *he;
465 u32 h;
466
467 BUG_ON(!ht->p.key_len);
468
469 h = __hashfn(ht, key, ht->p.key_len, tbl->size);
470 rht_for_each_rcu(he, tbl->buckets[h], ht) {
471 if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
472 ht->p.key_len))
473 continue;
474 return (void *) he - ht->p.head_offset;
475 }
476
477 return NULL;
478}
479EXPORT_SYMBOL_GPL(rhashtable_lookup);
480
481/**
482 * rhashtable_lookup_compare - search hash table with compare function
483 * @ht: hash table
484 * @hash: hash value of desired entry
485 * @compare: compare function, must return true on match
486 * @arg: argument passed on to compare function
487 *
488 * Traverses the bucket chain behind the provided hash value and calls the
489 * specified compare function for each entry.
490 *
491 * Lookups may occur in parallel with hash mutations as long as the lookup is
492 * guarded by rcu_read_lock(). The caller must take care of this.
493 *
494 * Returns the first entry on which the compare function returned true.
495 */
496void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
497 bool (*compare)(void *, void *), void *arg)
498{
499 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
500 struct rhash_head *he;
501
502 if (unlikely(hash >= tbl->size))
503 return NULL;
504
505 rht_for_each_rcu(he, tbl->buckets[hash], ht) {
506 if (!compare(rht_obj(ht, he), arg))
507 continue;
508 return (void *) he - ht->p.head_offset;
509 }
510
511 return NULL;
512}
513EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
514
515static size_t rounded_hashtable_size(unsigned int nelem)
516{
517 return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE);
518}
519
520/**
521 * rhashtable_init - initialize a new hash table
522 * @ht: hash table to be initialized
523 * @params: configuration parameters
524 *
525 * Initializes a new hash table based on the provided configuration
526 * parameters. A table can be configured either with a variable or
527 * fixed length key:
528 *
529 * Configuration Example 1: Fixed length keys
530 * struct test_obj {
531 * int key;
532 * void * my_member;
533 * struct rhash_head node;
534 * };
535 *
536 * struct rhashtable_params params = {
537 * .head_offset = offsetof(struct test_obj, node),
538 * .key_offset = offsetof(struct test_obj, key),
539 * .key_len = sizeof(int),
540 * .hashfn = arch_fast_hash,
541 * .mutex_is_held = &my_mutex_is_held,
542 * };
543 *
544 * Configuration Example 2: Variable length keys
545 * struct test_obj {
546 * [...]
547 * struct rhash_head node;
548 * };
549 *
550 * u32 my_hash_fn(const void *data, u32 seed)
551 * {
552 * struct test_obj *obj = data;
553 *
554 * return [... hash ...];
555 * }
556 *
557 * struct rhashtable_params params = {
558 * .head_offset = offsetof(struct test_obj, node),
559 * .hashfn = arch_fast_hash,
560 * .obj_hashfn = my_hash_fn,
561 * .mutex_is_held = &my_mutex_is_held,
562 * };
563 */
564int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
565{
566 struct bucket_table *tbl;
567 size_t size;
568
569 size = HASH_DEFAULT_SIZE;
570
571 if ((params->key_len && !params->hashfn) ||
572 (!params->key_len && !params->obj_hashfn))
573 return -EINVAL;
574
575 if (params->nelem_hint)
576 size = rounded_hashtable_size(params->nelem_hint);
577
578 tbl = bucket_table_alloc(size, GFP_KERNEL);
579 if (tbl == NULL)
580 return -ENOMEM;
581
582 memset(ht, 0, sizeof(*ht));
583 ht->shift = ilog2(tbl->size);
584 memcpy(&ht->p, params, sizeof(*params));
585 RCU_INIT_POINTER(ht->tbl, tbl);
586
587 if (!ht->p.hash_rnd)
588 get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
589
590 return 0;
591}
592EXPORT_SYMBOL_GPL(rhashtable_init);
593
594/**
595 * rhashtable_destroy - destroy hash table
596 * @ht: the hash table to destroy
597 *
598 * Frees the bucket array.
599 */
600void rhashtable_destroy(const struct rhashtable *ht)
601{
602 const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
603
604 bucket_table_free(tbl);
605}
606EXPORT_SYMBOL_GPL(rhashtable_destroy);
607
608/**************************************************************************
609 * Self Test
610 **************************************************************************/
611
612#ifdef CONFIG_TEST_RHASHTABLE
613
614#define TEST_HT_SIZE 8
615#define TEST_ENTRIES 2048
616#define TEST_PTR ((void *) 0xdeadbeef)
617#define TEST_NEXPANDS 4
618
619static int test_mutex_is_held(void)
620{
621 return 1;
622}
623
624struct test_obj {
625 void *ptr;
626 int value;
627 struct rhash_head node;
628};
629
630static int __init test_rht_lookup(struct rhashtable *ht)
631{
632 unsigned int i;
633
634 for (i = 0; i < TEST_ENTRIES * 2; i++) {
635 struct test_obj *obj;
636 bool expected = !(i % 2);
637 u32 key = i;
638
639 obj = rhashtable_lookup(ht, &key);
640
641 if (expected && !obj) {
642 pr_warn("Test failed: Could not find key %u\n", key);
643 return -ENOENT;
644 } else if (!expected && obj) {
645 pr_warn("Test failed: Unexpected entry found for key %u\n",
646 key);
647 return -EEXIST;
648 } else if (expected && obj) {
649 if (obj->ptr != TEST_PTR || obj->value != i) {
650 pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
651 obj->ptr, TEST_PTR, obj->value, i);
652 return -EINVAL;
653 }
654 }
655 }
656
657 return 0;
658}
659
660static void test_bucket_stats(struct rhashtable *ht,
661 struct bucket_table *tbl,
662 bool quiet)
663{
664 unsigned int cnt, i, total = 0;
665 struct test_obj *obj;
666
667 for (i = 0; i < tbl->size; i++) {
668 cnt = 0;
669
670 if (!quiet)
671 pr_info(" [%#4x/%zu]", i, tbl->size);
672
673 rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
674 cnt++;
675 total++;
676 if (!quiet)
677 pr_cont(" [%p],", obj);
678 }
679
680 if (!quiet)
681 pr_cont("\n [%#x] first element: %p, chain length: %u\n",
682 i, tbl->buckets[i], cnt);
683 }
684
685 pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
686 total, ht->nelems, TEST_ENTRIES);
687}
688
689static int __init test_rhashtable(struct rhashtable *ht)
690{
691 struct bucket_table *tbl;
692 struct test_obj *obj, *next;
693 int err;
694 unsigned int i;
695
696 /*
697 * Insertion Test:
698 * Insert TEST_ENTRIES into table with all keys even numbers
699 */
700 pr_info(" Adding %d keys\n", TEST_ENTRIES);
701 for (i = 0; i < TEST_ENTRIES; i++) {
702 struct test_obj *obj;
703
704 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
705 if (!obj) {
706 err = -ENOMEM;
707 goto error;
708 }
709
710 obj->ptr = TEST_PTR;
711 obj->value = i * 2;
712
713 rhashtable_insert(ht, &obj->node, GFP_KERNEL);
714 }
715
716 rcu_read_lock();
717 tbl = rht_dereference_rcu(ht->tbl, ht);
718 test_bucket_stats(ht, tbl, true);
719 test_rht_lookup(ht);
720 rcu_read_unlock();
721
722 for (i = 0; i < TEST_NEXPANDS; i++) {
723 pr_info(" Table expansion iteration %u...\n", i);
724 rhashtable_expand(ht, GFP_KERNEL);
725
726 rcu_read_lock();
727 pr_info(" Verifying lookups...\n");
728 test_rht_lookup(ht);
729 rcu_read_unlock();
730 }
731
732 for (i = 0; i < TEST_NEXPANDS; i++) {
733 pr_info(" Table shrinkage iteration %u...\n", i);
734 rhashtable_shrink(ht, GFP_KERNEL);
735
736 rcu_read_lock();
737 pr_info(" Verifying lookups...\n");
738 test_rht_lookup(ht);
739 rcu_read_unlock();
740 }
741
742 pr_info(" Deleting %d keys\n", TEST_ENTRIES);
743 for (i = 0; i < TEST_ENTRIES; i++) {
744 u32 key = i * 2;
745
746 obj = rhashtable_lookup(ht, &key);
747 BUG_ON(!obj);
748
749 rhashtable_remove(ht, &obj->node, GFP_KERNEL);
750 kfree(obj);
751 }
752
753 return 0;
754
755error:
756 tbl = rht_dereference_rcu(ht->tbl, ht);
757 for (i = 0; i < tbl->size; i++)
758 rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
759 kfree(obj);
760
761 return err;
762}
763
764static int __init test_rht_init(void)
765{
766 struct rhashtable ht;
767 struct rhashtable_params params = {
768 .nelem_hint = TEST_HT_SIZE,
769 .head_offset = offsetof(struct test_obj, node),
770 .key_offset = offsetof(struct test_obj, value),
771 .key_len = sizeof(int),
772 .hashfn = arch_fast_hash,
773 .mutex_is_held = &test_mutex_is_held,
774 .grow_decision = rht_grow_above_75,
775 .shrink_decision = rht_shrink_below_30,
776 };
777 int err;
778
779 pr_info("Running resizable hashtable tests...\n");
780
781 err = rhashtable_init(&ht, &params);
782 if (err < 0) {
783 pr_warn("Test failed: Unable to initialize hashtable: %d\n",
784 err);
785 return err;
786 }
787
788 err = test_rhashtable(&ht);
789
790 rhashtable_destroy(&ht);
791
792 return err;
793}
794
795subsys_initcall(test_rht_init);
796
797#endif /* CONFIG_TEST_RHASHTABLE */
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index c579e0f58818..89e0345733bd 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -66,7 +66,7 @@ struct bpf_test {
66 const char *descr; 66 const char *descr;
67 union { 67 union {
68 struct sock_filter insns[MAX_INSNS]; 68 struct sock_filter insns[MAX_INSNS];
69 struct sock_filter_int insns_int[MAX_INSNS]; 69 struct bpf_insn insns_int[MAX_INSNS];
70 } u; 70 } u;
71 __u8 aux; 71 __u8 aux;
72 __u8 data[MAX_DATA]; 72 __u8 data[MAX_DATA];
@@ -1761,9 +1761,9 @@ static int probe_filter_length(struct sock_filter *fp)
1761 return len + 1; 1761 return len + 1;
1762} 1762}
1763 1763
1764static struct sk_filter *generate_filter(int which, int *err) 1764static struct bpf_prog *generate_filter(int which, int *err)
1765{ 1765{
1766 struct sk_filter *fp; 1766 struct bpf_prog *fp;
1767 struct sock_fprog_kern fprog; 1767 struct sock_fprog_kern fprog;
1768 unsigned int flen = probe_filter_length(tests[which].u.insns); 1768 unsigned int flen = probe_filter_length(tests[which].u.insns);
1769 __u8 test_type = tests[which].aux & TEST_TYPE_MASK; 1769 __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
@@ -1773,7 +1773,7 @@ static struct sk_filter *generate_filter(int which, int *err)
1773 fprog.filter = tests[which].u.insns; 1773 fprog.filter = tests[which].u.insns;
1774 fprog.len = flen; 1774 fprog.len = flen;
1775 1775
1776 *err = sk_unattached_filter_create(&fp, &fprog); 1776 *err = bpf_prog_create(&fp, &fprog);
1777 if (tests[which].aux & FLAG_EXPECTED_FAIL) { 1777 if (tests[which].aux & FLAG_EXPECTED_FAIL) {
1778 if (*err == -EINVAL) { 1778 if (*err == -EINVAL) {
1779 pr_cont("PASS\n"); 1779 pr_cont("PASS\n");
@@ -1798,7 +1798,7 @@ static struct sk_filter *generate_filter(int which, int *err)
1798 break; 1798 break;
1799 1799
1800 case INTERNAL: 1800 case INTERNAL:
1801 fp = kzalloc(sk_filter_size(flen), GFP_KERNEL); 1801 fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
1802 if (fp == NULL) { 1802 if (fp == NULL) {
1803 pr_cont("UNEXPECTED_FAIL no memory left\n"); 1803 pr_cont("UNEXPECTED_FAIL no memory left\n");
1804 *err = -ENOMEM; 1804 *err = -ENOMEM;
@@ -1807,9 +1807,9 @@ static struct sk_filter *generate_filter(int which, int *err)
1807 1807
1808 fp->len = flen; 1808 fp->len = flen;
1809 memcpy(fp->insnsi, tests[which].u.insns_int, 1809 memcpy(fp->insnsi, tests[which].u.insns_int,
1810 fp->len * sizeof(struct sock_filter_int)); 1810 fp->len * sizeof(struct bpf_insn));
1811 1811
1812 sk_filter_select_runtime(fp); 1812 bpf_prog_select_runtime(fp);
1813 break; 1813 break;
1814 } 1814 }
1815 1815
@@ -1817,21 +1817,21 @@ static struct sk_filter *generate_filter(int which, int *err)
1817 return fp; 1817 return fp;
1818} 1818}
1819 1819
1820static void release_filter(struct sk_filter *fp, int which) 1820static void release_filter(struct bpf_prog *fp, int which)
1821{ 1821{
1822 __u8 test_type = tests[which].aux & TEST_TYPE_MASK; 1822 __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
1823 1823
1824 switch (test_type) { 1824 switch (test_type) {
1825 case CLASSIC: 1825 case CLASSIC:
1826 sk_unattached_filter_destroy(fp); 1826 bpf_prog_destroy(fp);
1827 break; 1827 break;
1828 case INTERNAL: 1828 case INTERNAL:
1829 sk_filter_free(fp); 1829 bpf_prog_free(fp);
1830 break; 1830 break;
1831 } 1831 }
1832} 1832}
1833 1833
1834static int __run_one(const struct sk_filter *fp, const void *data, 1834static int __run_one(const struct bpf_prog *fp, const void *data,
1835 int runs, u64 *duration) 1835 int runs, u64 *duration)
1836{ 1836{
1837 u64 start, finish; 1837 u64 start, finish;
@@ -1840,7 +1840,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
1840 start = ktime_to_us(ktime_get()); 1840 start = ktime_to_us(ktime_get());
1841 1841
1842 for (i = 0; i < runs; i++) 1842 for (i = 0; i < runs; i++)
1843 ret = SK_RUN_FILTER(fp, data); 1843 ret = BPF_PROG_RUN(fp, data);
1844 1844
1845 finish = ktime_to_us(ktime_get()); 1845 finish = ktime_to_us(ktime_get());
1846 1846
@@ -1850,7 +1850,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
1850 return ret; 1850 return ret;
1851} 1851}
1852 1852
1853static int run_one(const struct sk_filter *fp, struct bpf_test *test) 1853static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
1854{ 1854{
1855 int err_cnt = 0, i, runs = MAX_TESTRUNS; 1855 int err_cnt = 0, i, runs = MAX_TESTRUNS;
1856 1856
@@ -1884,7 +1884,7 @@ static __init int test_bpf(void)
1884 int i, err_cnt = 0, pass_cnt = 0; 1884 int i, err_cnt = 0, pass_cnt = 0;
1885 1885
1886 for (i = 0; i < ARRAY_SIZE(tests); i++) { 1886 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1887 struct sk_filter *fp; 1887 struct bpf_prog *fp;
1888 int err; 1888 int err;
1889 1889
1890 pr_info("#%d %s ", i, tests[i].descr); 1890 pr_info("#%d %s ", i, tests[i].descr);