aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug14
-rw-r--r--lib/Makefile2
-rw-r--r--lib/iov_iter.c4
-rw-r--r--lib/lzo/lzo1x_compress.c9
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c4
-rw-r--r--lib/nlattr.c184
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/rhashtable.c172
-rw-r--r--lib/sbitmap.c11
-rw-r--r--lib/string.c20
-rw-r--r--lib/syscall.c57
-rw-r--r--lib/test_rhashtable.c2
12 files changed, 276 insertions, 205 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0d9e81779e37..7117ac61174e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -219,6 +219,14 @@ config DEBUG_INFO_DWARF4
219 But it significantly improves the success of resolving 219 But it significantly improves the success of resolving
220 variables in gdb on optimized code. 220 variables in gdb on optimized code.
221 221
222config DEBUG_INFO_BTF
223 bool "Generate BTF typeinfo"
224 depends on DEBUG_INFO
225 help
226 Generate deduplicated BTF type information from DWARF debug info.
227 Turning this on expects presence of pahole tool, which will convert
228 DWARF type info into equivalent deduplicated BTF type info.
229
222config GDB_SCRIPTS 230config GDB_SCRIPTS
223 bool "Provide GDB scripts for kernel debugging" 231 bool "Provide GDB scripts for kernel debugging"
224 depends on DEBUG_INFO 232 depends on DEBUG_INFO
@@ -753,9 +761,9 @@ endmenu # "Memory Debugging"
753config ARCH_HAS_KCOV 761config ARCH_HAS_KCOV
754 bool 762 bool
755 help 763 help
756 KCOV does not have any arch-specific code, but currently it is enabled 764 An architecture should select this when it can successfully
757 only for x86_64. KCOV requires testing on other archs, and most likely 765 build and run with CONFIG_KCOV. This typically requires
758 disabling of instrumentation for some early boot code. 766 disabling instrumentation for some early boot code.
759 767
760config CC_HAS_SANCOV_TRACE_PC 768config CC_HAS_SANCOV_TRACE_PC
761 def_bool $(cc-option,-fsanitize-coverage=trace-pc) 769 def_bool $(cc-option,-fsanitize-coverage=trace-pc)
diff --git a/lib/Makefile b/lib/Makefile
index 4e066120a0d6..3b08673e8881 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -213,7 +213,7 @@ KCOV_INSTRUMENT_stackdepot.o := n
213libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ 213libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
214 fdt_empty_tree.o 214 fdt_empty_tree.o
215$(foreach file, $(libfdt_files), \ 215$(foreach file, $(libfdt_files), \
216 $(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt)) 216 $(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
217lib-$(CONFIG_LIBFDT) += $(libfdt_files) 217lib-$(CONFIG_LIBFDT) += $(libfdt_files)
218 218
219obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o 219obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index ea36dc355da1..b396d328a764 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1528,6 +1528,7 @@ EXPORT_SYMBOL(csum_and_copy_to_iter);
1528size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 1528size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1529 struct iov_iter *i) 1529 struct iov_iter *i)
1530{ 1530{
1531#ifdef CONFIG_CRYPTO
1531 struct ahash_request *hash = hashp; 1532 struct ahash_request *hash = hashp;
1532 struct scatterlist sg; 1533 struct scatterlist sg;
1533 size_t copied; 1534 size_t copied;
@@ -1537,6 +1538,9 @@ size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1537 ahash_request_set_crypt(hash, &sg, NULL, copied); 1538 ahash_request_set_crypt(hash, &sg, NULL, copied);
1538 crypto_ahash_update(hash); 1539 crypto_ahash_update(hash);
1539 return copied; 1540 return copied;
1541#else
1542 return 0;
1543#endif
1540} 1544}
1541EXPORT_SYMBOL(hash_and_copy_to_iter); 1545EXPORT_SYMBOL(hash_and_copy_to_iter);
1542 1546
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index 4525fb094844..a8ede77afe0d 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -291,13 +291,14 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
291{ 291{
292 const unsigned char *ip = in; 292 const unsigned char *ip = in;
293 unsigned char *op = out; 293 unsigned char *op = out;
294 unsigned char *data_start;
294 size_t l = in_len; 295 size_t l = in_len;
295 size_t t = 0; 296 size_t t = 0;
296 signed char state_offset = -2; 297 signed char state_offset = -2;
297 unsigned int m4_max_offset; 298 unsigned int m4_max_offset;
298 299
299 // LZO v0 will never write 17 as first byte, 300 // LZO v0 will never write 17 as first byte (except for zero-length
300 // so this is used to version the bitstream 301 // input), so this is used to version the bitstream
301 if (bitstream_version > 0) { 302 if (bitstream_version > 0) {
302 *op++ = 17; 303 *op++ = 17;
303 *op++ = bitstream_version; 304 *op++ = bitstream_version;
@@ -306,6 +307,8 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
306 m4_max_offset = M4_MAX_OFFSET_V0; 307 m4_max_offset = M4_MAX_OFFSET_V0;
307 } 308 }
308 309
310 data_start = op;
311
309 while (l > 20) { 312 while (l > 20) {
310 size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1); 313 size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1);
311 uintptr_t ll_end = (uintptr_t) ip + ll; 314 uintptr_t ll_end = (uintptr_t) ip + ll;
@@ -324,7 +327,7 @@ int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
324 if (t > 0) { 327 if (t > 0) {
325 const unsigned char *ii = in + in_len - t; 328 const unsigned char *ii = in + in_len - t;
326 329
327 if (op == out && t <= 238) { 330 if (op == data_start && t <= 238) {
328 *op++ = (17 + t); 331 *op++ = (17 + t);
329 } else if (t <= 3) { 332 } else if (t <= 3) {
330 op[state_offset] |= t; 333 op[state_offset] |= t;
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 6d2600ea3b55..9e07e9ef1aad 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -54,11 +54,9 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
54 if (unlikely(in_len < 3)) 54 if (unlikely(in_len < 3))
55 goto input_overrun; 55 goto input_overrun;
56 56
57 if (likely(*ip == 17)) { 57 if (likely(in_len >= 5) && likely(*ip == 17)) {
58 bitstream_version = ip[1]; 58 bitstream_version = ip[1];
59 ip += 2; 59 ip += 2;
60 if (unlikely(in_len < 5))
61 goto input_overrun;
62 } else { 60 } else {
63 bitstream_version = 0; 61 bitstream_version = 0;
64 } 62 }
diff --git a/lib/nlattr.c b/lib/nlattr.c
index d26de6156b97..29f6336e2422 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -69,7 +69,8 @@ static int validate_nla_bitfield32(const struct nlattr *nla,
69 69
70static int nla_validate_array(const struct nlattr *head, int len, int maxtype, 70static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
71 const struct nla_policy *policy, 71 const struct nla_policy *policy,
72 struct netlink_ext_ack *extack) 72 struct netlink_ext_ack *extack,
73 unsigned int validate)
73{ 74{
74 const struct nlattr *entry; 75 const struct nlattr *entry;
75 int rem; 76 int rem;
@@ -86,8 +87,8 @@ static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
86 return -ERANGE; 87 return -ERANGE;
87 } 88 }
88 89
89 ret = nla_validate(nla_data(entry), nla_len(entry), 90 ret = __nla_validate(nla_data(entry), nla_len(entry),
90 maxtype, policy, extack); 91 maxtype, policy, validate, extack);
91 if (ret < 0) 92 if (ret < 0)
92 return ret; 93 return ret;
93 } 94 }
@@ -154,13 +155,17 @@ static int nla_validate_int_range(const struct nla_policy *pt,
154} 155}
155 156
156static int validate_nla(const struct nlattr *nla, int maxtype, 157static int validate_nla(const struct nlattr *nla, int maxtype,
157 const struct nla_policy *policy, 158 const struct nla_policy *policy, unsigned int validate,
158 struct netlink_ext_ack *extack) 159 struct netlink_ext_ack *extack)
159{ 160{
161 u16 strict_start_type = policy[0].strict_start_type;
160 const struct nla_policy *pt; 162 const struct nla_policy *pt;
161 int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla); 163 int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
162 int err = -ERANGE; 164 int err = -ERANGE;
163 165
166 if (strict_start_type && type >= strict_start_type)
167 validate |= NL_VALIDATE_STRICT;
168
164 if (type <= 0 || type > maxtype) 169 if (type <= 0 || type > maxtype)
165 return 0; 170 return 0;
166 171
@@ -172,6 +177,11 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
172 (pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) { 177 (pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) {
173 pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n", 178 pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
174 current->comm, type); 179 current->comm, type);
180 if (validate & NL_VALIDATE_STRICT_ATTRS) {
181 NL_SET_ERR_MSG_ATTR(extack, nla,
182 "invalid attribute length");
183 return -EINVAL;
184 }
175 } 185 }
176 186
177 switch (pt->type) { 187 switch (pt->type) {
@@ -244,8 +254,9 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
244 if (attrlen < NLA_HDRLEN) 254 if (attrlen < NLA_HDRLEN)
245 goto out_err; 255 goto out_err;
246 if (pt->validation_data) { 256 if (pt->validation_data) {
247 err = nla_validate(nla_data(nla), nla_len(nla), pt->len, 257 err = __nla_validate(nla_data(nla), nla_len(nla), pt->len,
248 pt->validation_data, extack); 258 pt->validation_data, validate,
259 extack);
249 if (err < 0) { 260 if (err < 0) {
250 /* 261 /*
251 * return directly to preserve the inner 262 * return directly to preserve the inner
@@ -268,7 +279,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
268 279
269 err = nla_validate_array(nla_data(nla), nla_len(nla), 280 err = nla_validate_array(nla_data(nla), nla_len(nla),
270 pt->len, pt->validation_data, 281 pt->len, pt->validation_data,
271 extack); 282 extack, validate);
272 if (err < 0) { 283 if (err < 0) {
273 /* 284 /*
274 * return directly to preserve the inner 285 * return directly to preserve the inner
@@ -278,10 +289,23 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
278 } 289 }
279 } 290 }
280 break; 291 break;
292
293 case NLA_UNSPEC:
294 if (validate & NL_VALIDATE_UNSPEC) {
295 NL_SET_ERR_MSG_ATTR(extack, nla,
296 "Unsupported attribute");
297 return -EINVAL;
298 }
299 /* fall through */
300 case NLA_MIN_LEN:
301 if (attrlen < pt->len)
302 goto out_err;
303 break;
304
281 default: 305 default:
282 if (pt->len) 306 if (pt->len)
283 minlen = pt->len; 307 minlen = pt->len;
284 else if (pt->type != NLA_UNSPEC) 308 else
285 minlen = nla_attr_minlen[pt->type]; 309 minlen = nla_attr_minlen[pt->type];
286 310
287 if (attrlen < minlen) 311 if (attrlen < minlen)
@@ -315,37 +339,75 @@ out_err:
315 return err; 339 return err;
316} 340}
317 341
342static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
343 const struct nla_policy *policy,
344 unsigned int validate,
345 struct netlink_ext_ack *extack,
346 struct nlattr **tb)
347{
348 const struct nlattr *nla;
349 int rem;
350
351 if (tb)
352 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
353
354 nla_for_each_attr(nla, head, len, rem) {
355 u16 type = nla_type(nla);
356
357 if (type == 0 || type > maxtype) {
358 if (validate & NL_VALIDATE_MAXTYPE) {
359 NL_SET_ERR_MSG(extack, "Unknown attribute type");
360 return -EINVAL;
361 }
362 continue;
363 }
364 if (policy) {
365 int err = validate_nla(nla, maxtype, policy,
366 validate, extack);
367
368 if (err < 0)
369 return err;
370 }
371
372 if (tb)
373 tb[type] = (struct nlattr *)nla;
374 }
375
376 if (unlikely(rem > 0)) {
377 pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
378 rem, current->comm);
379 NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes");
380 if (validate & NL_VALIDATE_TRAILING)
381 return -EINVAL;
382 }
383
384 return 0;
385}
386
318/** 387/**
319 * nla_validate - Validate a stream of attributes 388 * __nla_validate - Validate a stream of attributes
320 * @head: head of attribute stream 389 * @head: head of attribute stream
321 * @len: length of attribute stream 390 * @len: length of attribute stream
322 * @maxtype: maximum attribute type to be expected 391 * @maxtype: maximum attribute type to be expected
323 * @policy: validation policy 392 * @policy: validation policy
393 * @validate: validation strictness
324 * @extack: extended ACK report struct 394 * @extack: extended ACK report struct
325 * 395 *
326 * Validates all attributes in the specified attribute stream against the 396 * Validates all attributes in the specified attribute stream against the
327 * specified policy. Attributes with a type exceeding maxtype will be 397 * specified policy. Validation depends on the validate flags passed, see
328 * ignored. See documenation of struct nla_policy for more details. 398 * &enum netlink_validation for more details on that.
399 * See documenation of struct nla_policy for more details.
329 * 400 *
330 * Returns 0 on success or a negative error code. 401 * Returns 0 on success or a negative error code.
331 */ 402 */
332int nla_validate(const struct nlattr *head, int len, int maxtype, 403int __nla_validate(const struct nlattr *head, int len, int maxtype,
333 const struct nla_policy *policy, 404 const struct nla_policy *policy, unsigned int validate,
334 struct netlink_ext_ack *extack) 405 struct netlink_ext_ack *extack)
335{ 406{
336 const struct nlattr *nla; 407 return __nla_validate_parse(head, len, maxtype, policy, validate,
337 int rem; 408 extack, NULL);
338
339 nla_for_each_attr(nla, head, len, rem) {
340 int err = validate_nla(nla, maxtype, policy, extack);
341
342 if (err < 0)
343 return err;
344 }
345
346 return 0;
347} 409}
348EXPORT_SYMBOL(nla_validate); 410EXPORT_SYMBOL(__nla_validate);
349 411
350/** 412/**
351 * nla_policy_len - Determin the max. length of a policy 413 * nla_policy_len - Determin the max. length of a policy
@@ -377,76 +439,30 @@ nla_policy_len(const struct nla_policy *p, int n)
377EXPORT_SYMBOL(nla_policy_len); 439EXPORT_SYMBOL(nla_policy_len);
378 440
379/** 441/**
380 * nla_parse - Parse a stream of attributes into a tb buffer 442 * __nla_parse - Parse a stream of attributes into a tb buffer
381 * @tb: destination array with maxtype+1 elements 443 * @tb: destination array with maxtype+1 elements
382 * @maxtype: maximum attribute type to be expected 444 * @maxtype: maximum attribute type to be expected
383 * @head: head of attribute stream 445 * @head: head of attribute stream
384 * @len: length of attribute stream 446 * @len: length of attribute stream
385 * @policy: validation policy 447 * @policy: validation policy
448 * @validate: validation strictness
449 * @extack: extended ACK pointer
386 * 450 *
387 * Parses a stream of attributes and stores a pointer to each attribute in 451 * Parses a stream of attributes and stores a pointer to each attribute in
388 * the tb array accessible via the attribute type. Attributes with a type 452 * the tb array accessible via the attribute type.
389 * exceeding maxtype will be silently ignored for backwards compatibility 453 * Validation is controlled by the @validate parameter.
390 * reasons. policy may be set to NULL if no validation is required.
391 * 454 *
392 * Returns 0 on success or a negative error code. 455 * Returns 0 on success or a negative error code.
393 */ 456 */
394static int __nla_parse(struct nlattr **tb, int maxtype, 457int __nla_parse(struct nlattr **tb, int maxtype,
395 const struct nlattr *head, int len, 458 const struct nlattr *head, int len,
396 bool strict, const struct nla_policy *policy, 459 const struct nla_policy *policy, unsigned int validate,
397 struct netlink_ext_ack *extack) 460 struct netlink_ext_ack *extack)
398{
399 const struct nlattr *nla;
400 int rem;
401
402 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
403
404 nla_for_each_attr(nla, head, len, rem) {
405 u16 type = nla_type(nla);
406
407 if (type == 0 || type > maxtype) {
408 if (strict) {
409 NL_SET_ERR_MSG(extack, "Unknown attribute type");
410 return -EINVAL;
411 }
412 continue;
413 }
414 if (policy) {
415 int err = validate_nla(nla, maxtype, policy, extack);
416
417 if (err < 0)
418 return err;
419 }
420
421 tb[type] = (struct nlattr *)nla;
422 }
423
424 if (unlikely(rem > 0)) {
425 pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
426 rem, current->comm);
427 NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes");
428 if (strict)
429 return -EINVAL;
430 }
431
432 return 0;
433}
434
435int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
436 int len, const struct nla_policy *policy,
437 struct netlink_ext_ack *extack)
438{
439 return __nla_parse(tb, maxtype, head, len, false, policy, extack);
440}
441EXPORT_SYMBOL(nla_parse);
442
443int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head,
444 int len, const struct nla_policy *policy,
445 struct netlink_ext_ack *extack)
446{ 461{
447 return __nla_parse(tb, maxtype, head, len, true, policy, extack); 462 return __nla_validate_parse(head, len, maxtype, policy, validate,
463 extack, tb);
448} 464}
449EXPORT_SYMBOL(nla_parse_strict); 465EXPORT_SYMBOL(__nla_parse);
450 466
451/** 467/**
452 * nla_find - Find a specific attribute in a stream of attributes 468 * nla_find - Find a specific attribute in a stream of attributes
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 4e90d443d1b0..e723eacf7868 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -39,7 +39,7 @@ endif
39ifeq ($(CONFIG_KERNEL_MODE_NEON),y) 39ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
40NEON_FLAGS := -ffreestanding 40NEON_FLAGS := -ffreestanding
41ifeq ($(ARCH),arm) 41ifeq ($(ARCH),arm)
42NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon 42NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon
43endif 43endif
44CFLAGS_recov_neon_inner.o += $(NEON_FLAGS) 44CFLAGS_recov_neon_inner.o += $(NEON_FLAGS)
45ifeq ($(ARCH),arm64) 45ifeq ($(ARCH),arm64)
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index f65e43fb1ff8..6529fe1b45c1 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -31,11 +31,10 @@
31 31
32#define HASH_DEFAULT_SIZE 64UL 32#define HASH_DEFAULT_SIZE 64UL
33#define HASH_MIN_SIZE 4U 33#define HASH_MIN_SIZE 4U
34#define BUCKET_LOCKS_PER_CPU 32UL
35 34
36union nested_table { 35union nested_table {
37 union nested_table __rcu *table; 36 union nested_table __rcu *table;
38 struct rhash_head __rcu *bucket; 37 struct rhash_lock_head __rcu *bucket;
39}; 38};
40 39
41static u32 head_hashfn(struct rhashtable *ht, 40static u32 head_hashfn(struct rhashtable *ht,
@@ -56,9 +55,11 @@ EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
56 55
57int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) 56int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
58{ 57{
59 spinlock_t *lock = rht_bucket_lock(tbl, hash); 58 if (!debug_locks)
60 59 return 1;
61 return (debug_locks) ? lockdep_is_held(lock) : 1; 60 if (unlikely(tbl->nest))
61 return 1;
62 return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
62} 63}
63EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); 64EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
64#else 65#else
@@ -104,7 +105,6 @@ static void bucket_table_free(const struct bucket_table *tbl)
104 if (tbl->nest) 105 if (tbl->nest)
105 nested_bucket_table_free(tbl); 106 nested_bucket_table_free(tbl);
106 107
107 free_bucket_spinlocks(tbl->locks);
108 kvfree(tbl); 108 kvfree(tbl);
109} 109}
110 110
@@ -131,9 +131,11 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
131 INIT_RHT_NULLS_HEAD(ntbl[i].bucket); 131 INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
132 } 132 }
133 133
134 rcu_assign_pointer(*prev, ntbl); 134 if (cmpxchg(prev, NULL, ntbl) == NULL)
135 135 return ntbl;
136 return ntbl; 136 /* Raced with another thread. */
137 kfree(ntbl);
138 return rcu_dereference(*prev);
137} 139}
138 140
139static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, 141static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
@@ -169,11 +171,11 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
169 gfp_t gfp) 171 gfp_t gfp)
170{ 172{
171 struct bucket_table *tbl = NULL; 173 struct bucket_table *tbl = NULL;
172 size_t size, max_locks; 174 size_t size;
173 int i; 175 int i;
176 static struct lock_class_key __key;
174 177
175 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 178 tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
176 tbl = kvzalloc(size, gfp);
177 179
178 size = nbuckets; 180 size = nbuckets;
179 181
@@ -185,17 +187,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
185 if (tbl == NULL) 187 if (tbl == NULL)
186 return NULL; 188 return NULL;
187 189
188 tbl->size = size; 190 lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
189
190 max_locks = size >> 1;
191 if (tbl->nest)
192 max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
193 191
194 if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks, 192 tbl->size = size;
195 ht->p.locks_mul, gfp) < 0) {
196 bucket_table_free(tbl);
197 return NULL;
198 }
199 193
200 rcu_head_init(&tbl->rcu); 194 rcu_head_init(&tbl->rcu);
201 INIT_LIST_HEAD(&tbl->walkers); 195 INIT_LIST_HEAD(&tbl->walkers);
@@ -221,14 +215,15 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
221 return new_tbl; 215 return new_tbl;
222} 216}
223 217
224static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) 218static int rhashtable_rehash_one(struct rhashtable *ht,
219 struct rhash_lock_head __rcu **bkt,
220 unsigned int old_hash)
225{ 221{
226 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 222 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
227 struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); 223 struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
228 struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
229 int err = -EAGAIN; 224 int err = -EAGAIN;
230 struct rhash_head *head, *next, *entry; 225 struct rhash_head *head, *next, *entry;
231 spinlock_t *new_bucket_lock; 226 struct rhash_head __rcu **pprev = NULL;
232 unsigned int new_hash; 227 unsigned int new_hash;
233 228
234 if (new_tbl->nest) 229 if (new_tbl->nest)
@@ -236,7 +231,8 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
236 231
237 err = -ENOENT; 232 err = -ENOENT;
238 233
239 rht_for_each(entry, old_tbl, old_hash) { 234 rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
235 old_tbl, old_hash) {
240 err = 0; 236 err = 0;
241 next = rht_dereference_bucket(entry->next, old_tbl, old_hash); 237 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
242 238
@@ -251,18 +247,19 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
251 247
252 new_hash = head_hashfn(ht, new_tbl, entry); 248 new_hash = head_hashfn(ht, new_tbl, entry);
253 249
254 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); 250 rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
255 251
256 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); 252 head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
257 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
258 new_tbl, new_hash);
259 253
260 RCU_INIT_POINTER(entry->next, head); 254 RCU_INIT_POINTER(entry->next, head);
261 255
262 rcu_assign_pointer(new_tbl->buckets[new_hash], entry); 256 rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
263 spin_unlock(new_bucket_lock);
264 257
265 rcu_assign_pointer(*pprev, next); 258 if (pprev)
259 rcu_assign_pointer(*pprev, next);
260 else
261 /* Need to preserved the bit lock. */
262 rht_assign_locked(bkt, next);
266 263
267out: 264out:
268 return err; 265 return err;
@@ -272,19 +269,19 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
272 unsigned int old_hash) 269 unsigned int old_hash)
273{ 270{
274 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 271 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
275 spinlock_t *old_bucket_lock; 272 struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
276 int err; 273 int err;
277 274
278 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); 275 if (!bkt)
276 return 0;
277 rht_lock(old_tbl, bkt);
279 278
280 spin_lock_bh(old_bucket_lock); 279 while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
281 while (!(err = rhashtable_rehash_one(ht, old_hash)))
282 ; 280 ;
283 281
284 if (err == -ENOENT) 282 if (err == -ENOENT)
285 err = 0; 283 err = 0;
286 284 rht_unlock(old_tbl, bkt);
287 spin_unlock_bh(old_bucket_lock);
288 285
289 return err; 286 return err;
290} 287}
@@ -419,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
419 else if (tbl->nest) 416 else if (tbl->nest)
420 err = rhashtable_rehash_alloc(ht, tbl, tbl->size); 417 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
421 418
422 if (!err) 419 if (!err || err == -EEXIST) {
423 err = rhashtable_rehash_table(ht); 420 int nerr;
421
422 nerr = rhashtable_rehash_table(ht);
423 err = err ?: nerr;
424 }
424 425
425 mutex_unlock(&ht->mutex); 426 mutex_unlock(&ht->mutex);
426 427
@@ -477,6 +478,7 @@ fail:
477} 478}
478 479
479static void *rhashtable_lookup_one(struct rhashtable *ht, 480static void *rhashtable_lookup_one(struct rhashtable *ht,
481 struct rhash_lock_head __rcu **bkt,
480 struct bucket_table *tbl, unsigned int hash, 482 struct bucket_table *tbl, unsigned int hash,
481 const void *key, struct rhash_head *obj) 483 const void *key, struct rhash_head *obj)
482{ 484{
@@ -484,13 +486,12 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
484 .ht = ht, 486 .ht = ht,
485 .key = key, 487 .key = key,
486 }; 488 };
487 struct rhash_head __rcu **pprev; 489 struct rhash_head __rcu **pprev = NULL;
488 struct rhash_head *head; 490 struct rhash_head *head;
489 int elasticity; 491 int elasticity;
490 492
491 elasticity = RHT_ELASTICITY; 493 elasticity = RHT_ELASTICITY;
492 pprev = rht_bucket_var(tbl, hash); 494 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
493 rht_for_each_from(head, *pprev, tbl, hash) {
494 struct rhlist_head *list; 495 struct rhlist_head *list;
495 struct rhlist_head *plist; 496 struct rhlist_head *plist;
496 497
@@ -512,7 +513,11 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
512 RCU_INIT_POINTER(list->next, plist); 513 RCU_INIT_POINTER(list->next, plist);
513 head = rht_dereference_bucket(head->next, tbl, hash); 514 head = rht_dereference_bucket(head->next, tbl, hash);
514 RCU_INIT_POINTER(list->rhead.next, head); 515 RCU_INIT_POINTER(list->rhead.next, head);
515 rcu_assign_pointer(*pprev, obj); 516 if (pprev)
517 rcu_assign_pointer(*pprev, obj);
518 else
519 /* Need to preserve the bit lock */
520 rht_assign_locked(bkt, obj);
516 521
517 return NULL; 522 return NULL;
518 } 523 }
@@ -524,12 +529,12 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
524} 529}
525 530
526static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, 531static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
532 struct rhash_lock_head __rcu **bkt,
527 struct bucket_table *tbl, 533 struct bucket_table *tbl,
528 unsigned int hash, 534 unsigned int hash,
529 struct rhash_head *obj, 535 struct rhash_head *obj,
530 void *data) 536 void *data)
531{ 537{
532 struct rhash_head __rcu **pprev;
533 struct bucket_table *new_tbl; 538 struct bucket_table *new_tbl;
534 struct rhash_head *head; 539 struct rhash_head *head;
535 540
@@ -552,11 +557,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
552 if (unlikely(rht_grow_above_100(ht, tbl))) 557 if (unlikely(rht_grow_above_100(ht, tbl)))
553 return ERR_PTR(-EAGAIN); 558 return ERR_PTR(-EAGAIN);
554 559
555 pprev = rht_bucket_insert(ht, tbl, hash); 560 head = rht_ptr(bkt, tbl, hash);
556 if (!pprev)
557 return ERR_PTR(-ENOMEM);
558
559 head = rht_dereference_bucket(*pprev, tbl, hash);
560 561
561 RCU_INIT_POINTER(obj->next, head); 562 RCU_INIT_POINTER(obj->next, head);
562 if (ht->rhlist) { 563 if (ht->rhlist) {
@@ -566,7 +567,10 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
566 RCU_INIT_POINTER(list->next, NULL); 567 RCU_INIT_POINTER(list->next, NULL);
567 } 568 }
568 569
569 rcu_assign_pointer(*pprev, obj); 570 /* bkt is always the head of the list, so it holds
571 * the lock, which we need to preserve
572 */
573 rht_assign_locked(bkt, obj);
570 574
571 atomic_inc(&ht->nelems); 575 atomic_inc(&ht->nelems);
572 if (rht_grow_above_75(ht, tbl)) 576 if (rht_grow_above_75(ht, tbl))
@@ -580,6 +584,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
580{ 584{
581 struct bucket_table *new_tbl; 585 struct bucket_table *new_tbl;
582 struct bucket_table *tbl; 586 struct bucket_table *tbl;
587 struct rhash_lock_head __rcu **bkt;
583 unsigned int hash; 588 unsigned int hash;
584 void *data; 589 void *data;
585 590
@@ -588,14 +593,25 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
588 do { 593 do {
589 tbl = new_tbl; 594 tbl = new_tbl;
590 hash = rht_head_hashfn(ht, tbl, obj, ht->p); 595 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
591 spin_lock_bh(rht_bucket_lock(tbl, hash)); 596 if (rcu_access_pointer(tbl->future_tbl))
592 597 /* Failure is OK */
593 data = rhashtable_lookup_one(ht, tbl, hash, key, obj); 598 bkt = rht_bucket_var(tbl, hash);
594 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); 599 else
595 if (PTR_ERR(new_tbl) != -EEXIST) 600 bkt = rht_bucket_insert(ht, tbl, hash);
596 data = ERR_CAST(new_tbl); 601 if (bkt == NULL) {
597 602 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
598 spin_unlock_bh(rht_bucket_lock(tbl, hash)); 603 data = ERR_PTR(-EAGAIN);
604 } else {
605 rht_lock(tbl, bkt);
606 data = rhashtable_lookup_one(ht, bkt, tbl,
607 hash, key, obj);
608 new_tbl = rhashtable_insert_one(ht, bkt, tbl,
609 hash, obj, data);
610 if (PTR_ERR(new_tbl) != -EEXIST)
611 data = ERR_CAST(new_tbl);
612
613 rht_unlock(tbl, bkt);
614 }
599 } while (!IS_ERR_OR_NULL(new_tbl)); 615 } while (!IS_ERR_OR_NULL(new_tbl));
600 616
601 if (PTR_ERR(data) == -EAGAIN) 617 if (PTR_ERR(data) == -EAGAIN)
@@ -1022,11 +1038,6 @@ int rhashtable_init(struct rhashtable *ht,
1022 1038
1023 size = rounded_hashtable_size(&ht->p); 1039 size = rounded_hashtable_size(&ht->p);
1024 1040
1025 if (params->locks_mul)
1026 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
1027 else
1028 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
1029
1030 ht->key_len = ht->p.key_len; 1041 ht->key_len = ht->p.key_len;
1031 if (!params->hashfn) { 1042 if (!params->hashfn) {
1032 ht->p.hashfn = jhash; 1043 ht->p.hashfn = jhash;
@@ -1128,7 +1139,7 @@ restart:
1128 struct rhash_head *pos, *next; 1139 struct rhash_head *pos, *next;
1129 1140
1130 cond_resched(); 1141 cond_resched();
1131 for (pos = rht_dereference(*rht_bucket(tbl, i), ht), 1142 for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
1132 next = !rht_is_a_nulls(pos) ? 1143 next = !rht_is_a_nulls(pos) ?
1133 rht_dereference(pos->next, ht) : NULL; 1144 rht_dereference(pos->next, ht) : NULL;
1134 !rht_is_a_nulls(pos); 1145 !rht_is_a_nulls(pos);
@@ -1155,11 +1166,10 @@ void rhashtable_destroy(struct rhashtable *ht)
1155} 1166}
1156EXPORT_SYMBOL_GPL(rhashtable_destroy); 1167EXPORT_SYMBOL_GPL(rhashtable_destroy);
1157 1168
1158struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, 1169struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
1159 unsigned int hash) 1170 unsigned int hash)
1160{ 1171{
1161 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1172 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1162 static struct rhash_head __rcu *rhnull;
1163 unsigned int index = hash & ((1 << tbl->nest) - 1); 1173 unsigned int index = hash & ((1 << tbl->nest) - 1);
1164 unsigned int size = tbl->size >> tbl->nest; 1174 unsigned int size = tbl->size >> tbl->nest;
1165 unsigned int subhash = hash; 1175 unsigned int subhash = hash;
@@ -1177,20 +1187,28 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1177 subhash >>= shift; 1187 subhash >>= shift;
1178 } 1188 }
1179 1189
1180 if (!ntbl) { 1190 if (!ntbl)
1181 if (!rhnull) 1191 return NULL;
1182 INIT_RHT_NULLS_HEAD(rhnull);
1183 return &rhnull;
1184 }
1185 1192
1186 return &ntbl[subhash].bucket; 1193 return &ntbl[subhash].bucket;
1187 1194
1188} 1195}
1196EXPORT_SYMBOL_GPL(__rht_bucket_nested);
1197
1198struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1199 unsigned int hash)
1200{
1201 static struct rhash_lock_head __rcu *rhnull;
1202
1203 if (!rhnull)
1204 INIT_RHT_NULLS_HEAD(rhnull);
1205 return __rht_bucket_nested(tbl, hash) ?: &rhnull;
1206}
1189EXPORT_SYMBOL_GPL(rht_bucket_nested); 1207EXPORT_SYMBOL_GPL(rht_bucket_nested);
1190 1208
1191struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, 1209struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1192 struct bucket_table *tbl, 1210 struct bucket_table *tbl,
1193 unsigned int hash) 1211 unsigned int hash)
1194{ 1212{
1195 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1213 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1196 unsigned int index = hash & ((1 << tbl->nest) - 1); 1214 unsigned int index = hash & ((1 << tbl->nest) - 1);
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 5b382c1244ed..155fe38756ec 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
591void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 591void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
592 unsigned int cpu) 592 unsigned int cpu)
593{ 593{
594 /*
595 * Once the clear bit is set, the bit may be allocated out.
596 *
597 * Orders READ/WRITE on the asssociated instance(such as request
598 * of blk_mq) by this bit for avoiding race with re-allocation,
599 * and its pair is the memory barrier implied in __sbitmap_get_word.
600 *
601 * One invariant is that the clear bit has to be zero when the bit
602 * is in use.
603 */
604 smp_mb__before_atomic();
594 sbitmap_deferred_clear_bit(&sbq->sb, nr); 605 sbitmap_deferred_clear_bit(&sbq->sb, nr);
595 606
596 /* 607 /*
diff --git a/lib/string.c b/lib/string.c
index 38e4ca08e757..3ab861c1a857 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
866EXPORT_SYMBOL(memcmp); 866EXPORT_SYMBOL(memcmp);
867#endif 867#endif
868 868
869#ifndef __HAVE_ARCH_BCMP
870/**
871 * bcmp - returns 0 if and only if the buffers have identical contents.
872 * @a: pointer to first buffer.
873 * @b: pointer to second buffer.
874 * @len: size of buffers.
875 *
876 * The sign or magnitude of a non-zero return value has no particular
877 * meaning, and architectures may implement their own more efficient bcmp(). So
878 * while this particular implementation is a simple (tail) call to memcmp, do
879 * not rely on anything but whether the return value is zero or non-zero.
880 */
881#undef bcmp
882int bcmp(const void *a, const void *b, size_t len)
883{
884 return memcmp(a, b, len);
885}
886EXPORT_SYMBOL(bcmp);
887#endif
888
869#ifndef __HAVE_ARCH_MEMSCAN 889#ifndef __HAVE_ARCH_MEMSCAN
870/** 890/**
871 * memscan - Find a character in an area of memory. 891 * memscan - Find a character in an area of memory.
diff --git a/lib/syscall.c b/lib/syscall.c
index 1a7077f20eae..fb328e7ccb08 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -5,16 +5,14 @@
5#include <linux/export.h> 5#include <linux/export.h>
6#include <asm/syscall.h> 6#include <asm/syscall.h>
7 7
8static int collect_syscall(struct task_struct *target, long *callno, 8static int collect_syscall(struct task_struct *target, struct syscall_info *info)
9 unsigned long args[6], unsigned int maxargs,
10 unsigned long *sp, unsigned long *pc)
11{ 9{
12 struct pt_regs *regs; 10 struct pt_regs *regs;
13 11
14 if (!try_get_task_stack(target)) { 12 if (!try_get_task_stack(target)) {
15 /* Task has no stack, so the task isn't in a syscall. */ 13 /* Task has no stack, so the task isn't in a syscall. */
16 *sp = *pc = 0; 14 memset(info, 0, sizeof(*info));
17 *callno = -1; 15 info->data.nr = -1;
18 return 0; 16 return 0;
19 } 17 }
20 18
@@ -24,12 +22,13 @@ static int collect_syscall(struct task_struct *target, long *callno,
24 return -EAGAIN; 22 return -EAGAIN;
25 } 23 }
26 24
27 *sp = user_stack_pointer(regs); 25 info->sp = user_stack_pointer(regs);
28 *pc = instruction_pointer(regs); 26 info->data.instruction_pointer = instruction_pointer(regs);
29 27
30 *callno = syscall_get_nr(target, regs); 28 info->data.nr = syscall_get_nr(target, regs);
31 if (*callno != -1L && maxargs > 0) 29 if (info->data.nr != -1L)
32 syscall_get_arguments(target, regs, 0, maxargs, args); 30 syscall_get_arguments(target, regs,
31 (unsigned long *)&info->data.args[0]);
33 32
34 put_task_stack(target); 33 put_task_stack(target);
35 return 0; 34 return 0;
@@ -38,41 +37,35 @@ static int collect_syscall(struct task_struct *target, long *callno,
38/** 37/**
39 * task_current_syscall - Discover what a blocked task is doing. 38 * task_current_syscall - Discover what a blocked task is doing.
40 * @target: thread to examine 39 * @target: thread to examine
41 * @callno: filled with system call number or -1 40 * @info: structure with the following fields:
42 * @args: filled with @maxargs system call arguments 41 * .sp - filled with user stack pointer
43 * @maxargs: number of elements in @args to fill 42 * .data.nr - filled with system call number or -1
44 * @sp: filled with user stack pointer 43 * .data.args - filled with @maxargs system call arguments
45 * @pc: filled with user PC 44 * .data.instruction_pointer - filled with user PC
46 * 45 *
47 * If @target is blocked in a system call, returns zero with *@callno 46 * If @target is blocked in a system call, returns zero with @info.data.nr
48 * set to the the call's number and @args filled in with its arguments. 47 * set to the the call's number and @info.data.args filled in with its
49 * Registers not used for system call arguments may not be available and 48 * arguments. Registers not used for system call arguments may not be available
50 * it is not kosher to use &struct user_regset calls while the system 49 * and it is not kosher to use &struct user_regset calls while the system
51 * call is still in progress. Note we may get this result if @target 50 * call is still in progress. Note we may get this result if @target
52 * has finished its system call but not yet returned to user mode, such 51 * has finished its system call but not yet returned to user mode, such
53 * as when it's stopped for signal handling or syscall exit tracing. 52 * as when it's stopped for signal handling or syscall exit tracing.
54 * 53 *
55 * If @target is blocked in the kernel during a fault or exception, 54 * If @target is blocked in the kernel during a fault or exception,
56 * returns zero with *@callno set to -1 and does not fill in @args. 55 * returns zero with *@info.data.nr set to -1 and does not fill in
57 * If so, it's now safe to examine @target using &struct user_regset 56 * @info.data.args. If so, it's now safe to examine @target using
58 * get() calls as long as we're sure @target won't return to user mode. 57 * &struct user_regset get() calls as long as we're sure @target won't return
58 * to user mode.
59 * 59 *
60 * Returns -%EAGAIN if @target does not remain blocked. 60 * Returns -%EAGAIN if @target does not remain blocked.
61 *
62 * Returns -%EINVAL if @maxargs is too large (maximum is six).
63 */ 61 */
64int task_current_syscall(struct task_struct *target, long *callno, 62int task_current_syscall(struct task_struct *target, struct syscall_info *info)
65 unsigned long args[6], unsigned int maxargs,
66 unsigned long *sp, unsigned long *pc)
67{ 63{
68 long state; 64 long state;
69 unsigned long ncsw; 65 unsigned long ncsw;
70 66
71 if (unlikely(maxargs > 6))
72 return -EINVAL;
73
74 if (target == current) 67 if (target == current)
75 return collect_syscall(target, callno, args, maxargs, sp, pc); 68 return collect_syscall(target, info);
76 69
77 state = target->state; 70 state = target->state;
78 if (unlikely(!state)) 71 if (unlikely(!state))
@@ -80,7 +73,7 @@ int task_current_syscall(struct task_struct *target, long *callno,
80 73
81 ncsw = wait_task_inactive(target, state); 74 ncsw = wait_task_inactive(target, state);
82 if (unlikely(!ncsw) || 75 if (unlikely(!ncsw) ||
83 unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || 76 unlikely(collect_syscall(target, info)) ||
84 unlikely(wait_task_inactive(target, state) != ncsw)) 77 unlikely(wait_task_inactive(target, state) != ncsw))
85 return -EAGAIN; 78 return -EAGAIN;
86 79
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 3bd2e91bfc29..084fe5a6ac57 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -500,7 +500,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
500 struct rhash_head *pos, *next; 500 struct rhash_head *pos, *next;
501 struct test_obj_rhl *p; 501 struct test_obj_rhl *p;
502 502
503 pos = rht_dereference(tbl->buckets[i], ht); 503 pos = rht_ptr_exclusive(tbl->buckets + i);
504 next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; 504 next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
505 505
506 if (!rht_is_a_nulls(pos)) { 506 if (!rht_is_a_nulls(pos)) {