aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-03-23 11:24:57 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-23 11:31:58 -0400
commit03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch)
treefbaf8738296b2e9dcba81c6daef2d515b6c4948c /lib
parent6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff)
parentf36b7534b83357cf52e747905de6d65b4f7c2512 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here... For the mac80211 stuff, these were fortunately just parallel adds. Trivially resolved. In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the function phy_disable_interrupts() earlier in the file, whilst in 'net-next' the phy_error() call from this function was removed. In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the 'rt_table_id' member of rtable collided with a bug fix in 'net' that added a new struct member "rt_mtu_locked" which needs to be copied over here. The mlxsw driver conflict consisted of net-next separating the span code and definitions into separate files, whilst a 'net' bug fix made some changes to that moved code. The mlx5 infiniband conflict resolution was quite non-trivial, the RDMA tree's merge commit was used as a guide here, and here are their notes: ==================== Due to bug fixes found by the syzkaller bot and taken into the for-rc branch after development for the 4.17 merge window had already started being taken into the for-next branch, there were fairly non-trivial merge issues that would need to be resolved between the for-rc branch and the for-next branch. This merge resolves those conflicts and provides a unified base upon which ongoing development for 4.17 can be based. Conflicts: drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524 (IB/mlx5: Fix cleanup order on unload) added to for-rc and commit b5ca15ad7e61 (IB/mlx5: Add proper representors support) add as part of the devel cycle both needed to modify the init/de-init functions used by mlx5. To support the new representors, the new functions added by the cleanup patch needed to be made non-static, and the init/de-init list added by the representors patch needed to be modified to match the init/de-init list changes made by the cleanup patch. Updates: drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function prototypes added by representors patch to reflect new function names as changed by cleanup patch drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init stage list to match new order from cleanup patch ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/btree.c10
-rw-r--r--lib/bug.c4
-rw-r--r--lib/ioremap.c6
-rw-r--r--lib/percpu-refcount.c2
-rw-r--r--lib/rhashtable.c4
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_rhashtable.c134
8 files changed, 154 insertions, 10 deletions
diff --git a/lib/btree.c b/lib/btree.c
index f93a945274af..590facba2c50 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * As should be obvious for Linux kernel code, license is GPLv2 4 * As should be obvious for Linux kernel code, license is GPLv2
5 * 5 *
6 * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> 6 * Copyright (c) 2007-2008 Joern Engel <joern@purestorage.com>
7 * Bits and pieces stolen from Peter Zijlstra's code, which is 7 * Bits and pieces stolen from Peter Zijlstra's code, which is
8 * Copyright 2007, Red Hat Inc. Peter Zijlstra 8 * Copyright 2007, Red Hat Inc. Peter Zijlstra
9 * GPLv2 9 * GPLv2
@@ -76,6 +76,8 @@ struct btree_geo btree_geo128 = {
76}; 76};
77EXPORT_SYMBOL_GPL(btree_geo128); 77EXPORT_SYMBOL_GPL(btree_geo128);
78 78
79#define MAX_KEYLEN (2 * LONG_PER_U64)
80
79static struct kmem_cache *btree_cachep; 81static struct kmem_cache *btree_cachep;
80 82
81void *btree_alloc(gfp_t gfp_mask, void *pool_data) 83void *btree_alloc(gfp_t gfp_mask, void *pool_data)
@@ -313,7 +315,7 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
313{ 315{
314 int i, height; 316 int i, height;
315 unsigned long *node, *oldnode; 317 unsigned long *node, *oldnode;
316 unsigned long *retry_key = NULL, key[geo->keylen]; 318 unsigned long *retry_key = NULL, key[MAX_KEYLEN];
317 319
318 if (keyzero(geo, __key)) 320 if (keyzero(geo, __key))
319 return NULL; 321 return NULL;
@@ -639,8 +641,8 @@ EXPORT_SYMBOL_GPL(btree_remove);
639int btree_merge(struct btree_head *target, struct btree_head *victim, 641int btree_merge(struct btree_head *target, struct btree_head *victim,
640 struct btree_geo *geo, gfp_t gfp) 642 struct btree_geo *geo, gfp_t gfp)
641{ 643{
642 unsigned long key[geo->keylen]; 644 unsigned long key[MAX_KEYLEN];
643 unsigned long dup[geo->keylen]; 645 unsigned long dup[MAX_KEYLEN];
644 void *val; 646 void *val;
645 int err; 647 int err;
646 648
diff --git a/lib/bug.c b/lib/bug.c
index c1b0fad31b10..1077366f496b 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -150,6 +150,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
150 return BUG_TRAP_TYPE_NONE; 150 return BUG_TRAP_TYPE_NONE;
151 151
152 bug = find_bug(bugaddr); 152 bug = find_bug(bugaddr);
153 if (!bug)
154 return BUG_TRAP_TYPE_NONE;
153 155
154 file = NULL; 156 file = NULL;
155 line = 0; 157 line = 0;
@@ -191,7 +193,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
191 if (file) 193 if (file)
192 pr_crit("kernel BUG at %s:%u!\n", file, line); 194 pr_crit("kernel BUG at %s:%u!\n", file, line);
193 else 195 else
194 pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n", 196 pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
195 (void *)bugaddr); 197 (void *)bugaddr);
196 198
197 return BUG_TRAP_TYPE_BUG; 199 return BUG_TRAP_TYPE_BUG;
diff --git a/lib/ioremap.c b/lib/ioremap.c
index b808a390e4c3..54e5bbaa3200 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -91,7 +91,8 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
91 91
92 if (ioremap_pmd_enabled() && 92 if (ioremap_pmd_enabled() &&
93 ((next - addr) == PMD_SIZE) && 93 ((next - addr) == PMD_SIZE) &&
94 IS_ALIGNED(phys_addr + addr, PMD_SIZE)) { 94 IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
95 pmd_free_pte_page(pmd)) {
95 if (pmd_set_huge(pmd, phys_addr + addr, prot)) 96 if (pmd_set_huge(pmd, phys_addr + addr, prot))
96 continue; 97 continue;
97 } 98 }
@@ -117,7 +118,8 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
117 118
118 if (ioremap_pud_enabled() && 119 if (ioremap_pud_enabled() &&
119 ((next - addr) == PUD_SIZE) && 120 ((next - addr) == PUD_SIZE) &&
120 IS_ALIGNED(phys_addr + addr, PUD_SIZE)) { 121 IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
122 pud_free_pmd_page(pud)) {
121 if (pud_set_huge(pud, phys_addr + addr, prot)) 123 if (pud_set_huge(pud, phys_addr + addr, prot))
122 continue; 124 continue;
123 } 125 }
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 30e7dd88148b..9f96fa7bc000 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -322,6 +322,8 @@ EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
322 * This function normally doesn't block and can be called from any context 322 * This function normally doesn't block and can be called from any context
323 * but it may block if @confirm_kill is specified and @ref is in the 323 * but it may block if @confirm_kill is specified and @ref is in the
324 * process of switching to atomic mode by percpu_ref_switch_to_atomic(). 324 * process of switching to atomic mode by percpu_ref_switch_to_atomic().
325 *
326 * There are no implied RCU grace periods between kill and release.
325 */ 327 */
326void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 328void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
327 percpu_ref_func_t *confirm_kill) 329 percpu_ref_func_t *confirm_kill)
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 3825c30aaa36..47de025b6245 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -506,8 +506,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
506 if (!key || 506 if (!key ||
507 (ht->p.obj_cmpfn ? 507 (ht->p.obj_cmpfn ?
508 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : 508 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
509 rhashtable_compare(&arg, rht_obj(ht, head)))) 509 rhashtable_compare(&arg, rht_obj(ht, head)))) {
510 pprev = &head->next;
510 continue; 511 continue;
512 }
511 513
512 if (!ht->rhlist) 514 if (!ht->rhlist)
513 return rht_obj(ht, head); 515 return rht_obj(ht, head);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 2efb213716fa..3e9335493fe4 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -5467,7 +5467,7 @@ static struct bpf_test tests[] = {
5467 { 5467 {
5468 "BPF_MAXINSNS: Jump, gap, jump, ...", 5468 "BPF_MAXINSNS: Jump, gap, jump, ...",
5469 { }, 5469 { },
5470#ifdef CONFIG_BPF_JIT_ALWAYS_ON 5470#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86)
5471 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 5471 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
5472#else 5472#else
5473 CLASSIC | FLAG_NO_DATA, 5473 CLASSIC | FLAG_NO_DATA,
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index e372b97eee13..0e5b7a61460b 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -1141,7 +1141,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
1141 mutex_lock(&reg_dev_mutex); 1141 mutex_lock(&reg_dev_mutex);
1142 1142
1143 /* int should suffice for number of devices, test for wrap */ 1143 /* int should suffice for number of devices, test for wrap */
1144 if (unlikely(num_test_devs + 1) < 0) { 1144 if (num_test_devs + 1 == INT_MAX) {
1145 pr_err("reached limit of number of test devices\n"); 1145 pr_err("reached limit of number of test devices\n");
1146 goto out; 1146 goto out;
1147 } 1147 }
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 76d3667fdea2..f4000c137dbe 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -79,6 +79,21 @@ struct thread_data {
79 struct test_obj *objs; 79 struct test_obj *objs;
80}; 80};
81 81
82static u32 my_hashfn(const void *data, u32 len, u32 seed)
83{
84 const struct test_obj_rhl *obj = data;
85
86 return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE;
87}
88
89static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
90{
91 const struct test_obj_rhl *test_obj = obj;
92 const struct test_obj_val *val = arg->key;
93
94 return test_obj->value.id - val->id;
95}
96
82static struct rhashtable_params test_rht_params = { 97static struct rhashtable_params test_rht_params = {
83 .head_offset = offsetof(struct test_obj, node), 98 .head_offset = offsetof(struct test_obj, node),
84 .key_offset = offsetof(struct test_obj, value), 99 .key_offset = offsetof(struct test_obj, value),
@@ -87,6 +102,17 @@ static struct rhashtable_params test_rht_params = {
87 .nulls_base = (3U << RHT_BASE_SHIFT), 102 .nulls_base = (3U << RHT_BASE_SHIFT),
88}; 103};
89 104
105static struct rhashtable_params test_rht_params_dup = {
106 .head_offset = offsetof(struct test_obj_rhl, list_node),
107 .key_offset = offsetof(struct test_obj_rhl, value),
108 .key_len = sizeof(struct test_obj_val),
109 .hashfn = jhash,
110 .obj_hashfn = my_hashfn,
111 .obj_cmpfn = my_cmpfn,
112 .nelem_hint = 128,
113 .automatic_shrinking = false,
114};
115
90static struct semaphore prestart_sem; 116static struct semaphore prestart_sem;
91static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); 117static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
92 118
@@ -465,6 +491,112 @@ static int __init test_rhashtable_max(struct test_obj *array,
465 return err; 491 return err;
466} 492}
467 493
494static unsigned int __init print_ht(struct rhltable *rhlt)
495{
496 struct rhashtable *ht;
497 const struct bucket_table *tbl;
498 char buff[512] = "";
499 unsigned int i, cnt = 0;
500
501 ht = &rhlt->ht;
502 tbl = rht_dereference(ht->tbl, ht);
503 for (i = 0; i < tbl->size; i++) {
504 struct rhash_head *pos, *next;
505 struct test_obj_rhl *p;
506
507 pos = rht_dereference(tbl->buckets[i], ht);
508 next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
509
510 if (!rht_is_a_nulls(pos)) {
511 sprintf(buff, "%s\nbucket[%d] -> ", buff, i);
512 }
513
514 while (!rht_is_a_nulls(pos)) {
515 struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead);
516 sprintf(buff, "%s[[", buff);
517 do {
518 pos = &list->rhead;
519 list = rht_dereference(list->next, ht);
520 p = rht_obj(ht, pos);
521
522 sprintf(buff, "%s val %d (tid=%d)%s", buff, p->value.id, p->value.tid,
523 list? ", " : " ");
524 cnt++;
525 } while (list);
526
527 pos = next,
528 next = !rht_is_a_nulls(pos) ?
529 rht_dereference(pos->next, ht) : NULL;
530
531 sprintf(buff, "%s]]%s", buff, !rht_is_a_nulls(pos) ? " -> " : "");
532 }
533 }
534 printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
535
536 return cnt;
537}
538
539static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
540 int cnt, bool slow)
541{
542 struct rhltable rhlt;
543 unsigned int i, ret;
544 const char *key;
545 int err = 0;
546
547 err = rhltable_init(&rhlt, &test_rht_params_dup);
548 if (WARN_ON(err))
549 return err;
550
551 for (i = 0; i < cnt; i++) {
552 rhl_test_objects[i].value.tid = i;
553 key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
554 key += test_rht_params_dup.key_offset;
555
556 if (slow) {
557 err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
558 &rhl_test_objects[i].list_node.rhead));
559 if (err == -EAGAIN)
560 err = 0;
561 } else
562 err = rhltable_insert(&rhlt,
563 &rhl_test_objects[i].list_node,
564 test_rht_params_dup);
565 if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
566 goto skip_print;
567 }
568
569 ret = print_ht(&rhlt);
570 WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
571
572skip_print:
573 rhltable_destroy(&rhlt);
574
575 return 0;
576}
577
578static int __init test_insert_duplicates_run(void)
579{
580 struct test_obj_rhl rhl_test_objects[3] = {};
581
582 pr_info("test inserting duplicates\n");
583
584 /* two different values that map to same bucket */
585 rhl_test_objects[0].value.id = 1;
586 rhl_test_objects[1].value.id = 21;
587
588 /* and another duplicate with same as [0] value
589 * which will be second on the bucket list */
590 rhl_test_objects[2].value.id = rhl_test_objects[0].value.id;
591
592 test_insert_dup(rhl_test_objects, 2, false);
593 test_insert_dup(rhl_test_objects, 3, false);
594 test_insert_dup(rhl_test_objects, 2, true);
595 test_insert_dup(rhl_test_objects, 3, true);
596
597 return 0;
598}
599
468static int thread_lookup_test(struct thread_data *tdata) 600static int thread_lookup_test(struct thread_data *tdata)
469{ 601{
470 unsigned int entries = tdata->entries; 602 unsigned int entries = tdata->entries;
@@ -613,6 +745,8 @@ static int __init test_rht_init(void)
613 do_div(total_time, runs); 745 do_div(total_time, runs);
614 pr_info("Average test time: %llu\n", total_time); 746 pr_info("Average test time: %llu\n", total_time);
615 747
748 test_insert_duplicates_run();
749
616 if (!tcount) 750 if (!tcount)
617 return 0; 751 return 0;
618 752