diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 1 | ||||
| -rw-r--r-- | lib/btree.c | 10 | ||||
| -rw-r--r-- | lib/bug.c | 4 | ||||
| -rw-r--r-- | lib/dma-debug.c | 10 | ||||
| -rw-r--r-- | lib/dma-direct.c | 5 | ||||
| -rw-r--r-- | lib/idr.c | 15 | ||||
| -rw-r--r-- | lib/ioremap.c | 6 | ||||
| -rw-r--r-- | lib/percpu-refcount.c | 2 | ||||
| -rw-r--r-- | lib/radix-tree.c | 2 | ||||
| -rw-r--r-- | lib/rhashtable.c | 4 | ||||
| -rw-r--r-- | lib/test_bpf.c | 6 | ||||
| -rw-r--r-- | lib/test_kmod.c | 2 | ||||
| -rw-r--r-- | lib/test_rhashtable.c | 134 | ||||
| -rw-r--r-- | lib/vsprintf.c | 2 |
14 files changed, 177 insertions, 26 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6088408ef26c..64155e310a9f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -1642,6 +1642,7 @@ config DMA_API_DEBUG | |||
| 1642 | 1642 | ||
| 1643 | menuconfig RUNTIME_TESTING_MENU | 1643 | menuconfig RUNTIME_TESTING_MENU |
| 1644 | bool "Runtime Testing" | 1644 | bool "Runtime Testing" |
| 1645 | def_bool y | ||
| 1645 | 1646 | ||
| 1646 | if RUNTIME_TESTING_MENU | 1647 | if RUNTIME_TESTING_MENU |
| 1647 | 1648 | ||
diff --git a/lib/btree.c b/lib/btree.c index f93a945274af..590facba2c50 100644 --- a/lib/btree.c +++ b/lib/btree.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * As should be obvious for Linux kernel code, license is GPLv2 | 4 | * As should be obvious for Linux kernel code, license is GPLv2 |
| 5 | * | 5 | * |
| 6 | * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> | 6 | * Copyright (c) 2007-2008 Joern Engel <joern@purestorage.com> |
| 7 | * Bits and pieces stolen from Peter Zijlstra's code, which is | 7 | * Bits and pieces stolen from Peter Zijlstra's code, which is |
| 8 | * Copyright 2007, Red Hat Inc. Peter Zijlstra | 8 | * Copyright 2007, Red Hat Inc. Peter Zijlstra |
| 9 | * GPLv2 | 9 | * GPLv2 |
| @@ -76,6 +76,8 @@ struct btree_geo btree_geo128 = { | |||
| 76 | }; | 76 | }; |
| 77 | EXPORT_SYMBOL_GPL(btree_geo128); | 77 | EXPORT_SYMBOL_GPL(btree_geo128); |
| 78 | 78 | ||
| 79 | #define MAX_KEYLEN (2 * LONG_PER_U64) | ||
| 80 | |||
| 79 | static struct kmem_cache *btree_cachep; | 81 | static struct kmem_cache *btree_cachep; |
| 80 | 82 | ||
| 81 | void *btree_alloc(gfp_t gfp_mask, void *pool_data) | 83 | void *btree_alloc(gfp_t gfp_mask, void *pool_data) |
| @@ -313,7 +315,7 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, | |||
| 313 | { | 315 | { |
| 314 | int i, height; | 316 | int i, height; |
| 315 | unsigned long *node, *oldnode; | 317 | unsigned long *node, *oldnode; |
| 316 | unsigned long *retry_key = NULL, key[geo->keylen]; | 318 | unsigned long *retry_key = NULL, key[MAX_KEYLEN]; |
| 317 | 319 | ||
| 318 | if (keyzero(geo, __key)) | 320 | if (keyzero(geo, __key)) |
| 319 | return NULL; | 321 | return NULL; |
| @@ -639,8 +641,8 @@ EXPORT_SYMBOL_GPL(btree_remove); | |||
| 639 | int btree_merge(struct btree_head *target, struct btree_head *victim, | 641 | int btree_merge(struct btree_head *target, struct btree_head *victim, |
| 640 | struct btree_geo *geo, gfp_t gfp) | 642 | struct btree_geo *geo, gfp_t gfp) |
| 641 | { | 643 | { |
| 642 | unsigned long key[geo->keylen]; | 644 | unsigned long key[MAX_KEYLEN]; |
| 643 | unsigned long dup[geo->keylen]; | 645 | unsigned long dup[MAX_KEYLEN]; |
| 644 | void *val; | 646 | void *val; |
| 645 | int err; | 647 | int err; |
| 646 | 648 | ||
| @@ -150,6 +150,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
| 150 | return BUG_TRAP_TYPE_NONE; | 150 | return BUG_TRAP_TYPE_NONE; |
| 151 | 151 | ||
| 152 | bug = find_bug(bugaddr); | 152 | bug = find_bug(bugaddr); |
| 153 | if (!bug) | ||
| 154 | return BUG_TRAP_TYPE_NONE; | ||
| 153 | 155 | ||
| 154 | file = NULL; | 156 | file = NULL; |
| 155 | line = 0; | 157 | line = 0; |
| @@ -191,7 +193,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
| 191 | if (file) | 193 | if (file) |
| 192 | pr_crit("kernel BUG at %s:%u!\n", file, line); | 194 | pr_crit("kernel BUG at %s:%u!\n", file, line); |
| 193 | else | 195 | else |
| 194 | pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n", | 196 | pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n", |
| 195 | (void *)bugaddr); | 197 | (void *)bugaddr); |
| 196 | 198 | ||
| 197 | return BUG_TRAP_TYPE_BUG; | 199 | return BUG_TRAP_TYPE_BUG; |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 1b34d210452c..7f5cdc1e6b29 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -1491,12 +1491,12 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, | |||
| 1491 | if (unlikely(virt == NULL)) | 1491 | if (unlikely(virt == NULL)) |
| 1492 | return; | 1492 | return; |
| 1493 | 1493 | ||
| 1494 | entry = dma_entry_alloc(); | 1494 | /* handle vmalloc and linear addresses */ |
| 1495 | if (!entry) | 1495 | if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) |
| 1496 | return; | 1496 | return; |
| 1497 | 1497 | ||
| 1498 | /* handle vmalloc and linear addresses */ | 1498 | entry = dma_entry_alloc(); |
| 1499 | if (!is_vmalloc_addr(virt) && !virt_to_page(virt)) | 1499 | if (!entry) |
| 1500 | return; | 1500 | return; |
| 1501 | 1501 | ||
| 1502 | entry->type = dma_debug_coherent; | 1502 | entry->type = dma_debug_coherent; |
| @@ -1528,7 +1528,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
| 1528 | }; | 1528 | }; |
| 1529 | 1529 | ||
| 1530 | /* handle vmalloc and linear addresses */ | 1530 | /* handle vmalloc and linear addresses */ |
| 1531 | if (!is_vmalloc_addr(virt) && !virt_to_page(virt)) | 1531 | if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt)) |
| 1532 | return; | 1532 | return; |
| 1533 | 1533 | ||
| 1534 | if (is_vmalloc_addr(virt)) | 1534 | if (is_vmalloc_addr(virt)) |
diff --git a/lib/dma-direct.c b/lib/dma-direct.c index 40b1f92f2214..c9e8e21cb334 100644 --- a/lib/dma-direct.c +++ b/lib/dma-direct.c | |||
| @@ -84,6 +84,10 @@ again: | |||
| 84 | return page_address(page); | 84 | return page_address(page); |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | /* | ||
| 88 | * NOTE: this function must never look at the dma_addr argument, because we want | ||
| 89 | * to be able to use it as a helper for iommu implementations as well. | ||
| 90 | */ | ||
| 87 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, | 91 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, |
| 88 | dma_addr_t dma_addr, unsigned long attrs) | 92 | dma_addr_t dma_addr, unsigned long attrs) |
| 89 | { | 93 | { |
| @@ -152,5 +156,6 @@ const struct dma_map_ops dma_direct_ops = { | |||
| 152 | .map_sg = dma_direct_map_sg, | 156 | .map_sg = dma_direct_map_sg, |
| 153 | .dma_supported = dma_direct_supported, | 157 | .dma_supported = dma_direct_supported, |
| 154 | .mapping_error = dma_direct_mapping_error, | 158 | .mapping_error = dma_direct_mapping_error, |
| 159 | .is_phys = 1, | ||
| 155 | }; | 160 | }; |
| 156 | EXPORT_SYMBOL(dma_direct_ops); | 161 | EXPORT_SYMBOL(dma_direct_ops); |
| @@ -36,8 +36,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid, | |||
| 36 | { | 36 | { |
| 37 | struct radix_tree_iter iter; | 37 | struct radix_tree_iter iter; |
| 38 | void __rcu **slot; | 38 | void __rcu **slot; |
| 39 | int base = idr->idr_base; | 39 | unsigned int base = idr->idr_base; |
| 40 | int id = *nextid; | 40 | unsigned int id = *nextid; |
| 41 | 41 | ||
| 42 | if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) | 42 | if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) |
| 43 | return -EINVAL; | 43 | return -EINVAL; |
| @@ -204,10 +204,11 @@ int idr_for_each(const struct idr *idr, | |||
| 204 | 204 | ||
| 205 | radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { | 205 | radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { |
| 206 | int ret; | 206 | int ret; |
| 207 | unsigned long id = iter.index + base; | ||
| 207 | 208 | ||
| 208 | if (WARN_ON_ONCE(iter.index > INT_MAX)) | 209 | if (WARN_ON_ONCE(id > INT_MAX)) |
| 209 | break; | 210 | break; |
| 210 | ret = fn(iter.index + base, rcu_dereference_raw(*slot), data); | 211 | ret = fn(id, rcu_dereference_raw(*slot), data); |
| 211 | if (ret) | 212 | if (ret) |
| 212 | return ret; | 213 | return ret; |
| 213 | } | 214 | } |
| @@ -230,8 +231,8 @@ void *idr_get_next(struct idr *idr, int *nextid) | |||
| 230 | { | 231 | { |
| 231 | struct radix_tree_iter iter; | 232 | struct radix_tree_iter iter; |
| 232 | void __rcu **slot; | 233 | void __rcu **slot; |
| 233 | int base = idr->idr_base; | 234 | unsigned long base = idr->idr_base; |
| 234 | int id = *nextid; | 235 | unsigned long id = *nextid; |
| 235 | 236 | ||
| 236 | id = (id < base) ? 0 : id - base; | 237 | id = (id < base) ? 0 : id - base; |
| 237 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); | 238 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); |
| @@ -431,7 +432,6 @@ int ida_get_new_above(struct ida *ida, int start, int *id) | |||
| 431 | bitmap = this_cpu_xchg(ida_bitmap, NULL); | 432 | bitmap = this_cpu_xchg(ida_bitmap, NULL); |
| 432 | if (!bitmap) | 433 | if (!bitmap) |
| 433 | return -EAGAIN; | 434 | return -EAGAIN; |
| 434 | memset(bitmap, 0, sizeof(*bitmap)); | ||
| 435 | bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT; | 435 | bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT; |
| 436 | rcu_assign_pointer(*slot, bitmap); | 436 | rcu_assign_pointer(*slot, bitmap); |
| 437 | } | 437 | } |
| @@ -464,7 +464,6 @@ int ida_get_new_above(struct ida *ida, int start, int *id) | |||
| 464 | bitmap = this_cpu_xchg(ida_bitmap, NULL); | 464 | bitmap = this_cpu_xchg(ida_bitmap, NULL); |
| 465 | if (!bitmap) | 465 | if (!bitmap) |
| 466 | return -EAGAIN; | 466 | return -EAGAIN; |
| 467 | memset(bitmap, 0, sizeof(*bitmap)); | ||
| 468 | __set_bit(bit, bitmap->bitmap); | 467 | __set_bit(bit, bitmap->bitmap); |
| 469 | radix_tree_iter_replace(root, &iter, slot, bitmap); | 468 | radix_tree_iter_replace(root, &iter, slot, bitmap); |
| 470 | } | 469 | } |
diff --git a/lib/ioremap.c b/lib/ioremap.c index b808a390e4c3..54e5bbaa3200 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c | |||
| @@ -91,7 +91,8 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, | |||
| 91 | 91 | ||
| 92 | if (ioremap_pmd_enabled() && | 92 | if (ioremap_pmd_enabled() && |
| 93 | ((next - addr) == PMD_SIZE) && | 93 | ((next - addr) == PMD_SIZE) && |
| 94 | IS_ALIGNED(phys_addr + addr, PMD_SIZE)) { | 94 | IS_ALIGNED(phys_addr + addr, PMD_SIZE) && |
| 95 | pmd_free_pte_page(pmd)) { | ||
| 95 | if (pmd_set_huge(pmd, phys_addr + addr, prot)) | 96 | if (pmd_set_huge(pmd, phys_addr + addr, prot)) |
| 96 | continue; | 97 | continue; |
| 97 | } | 98 | } |
| @@ -117,7 +118,8 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, | |||
| 117 | 118 | ||
| 118 | if (ioremap_pud_enabled() && | 119 | if (ioremap_pud_enabled() && |
| 119 | ((next - addr) == PUD_SIZE) && | 120 | ((next - addr) == PUD_SIZE) && |
| 120 | IS_ALIGNED(phys_addr + addr, PUD_SIZE)) { | 121 | IS_ALIGNED(phys_addr + addr, PUD_SIZE) && |
| 122 | pud_free_pmd_page(pud)) { | ||
| 121 | if (pud_set_huge(pud, phys_addr + addr, prot)) | 123 | if (pud_set_huge(pud, phys_addr + addr, prot)) |
| 122 | continue; | 124 | continue; |
| 123 | } | 125 | } |
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 30e7dd88148b..9f96fa7bc000 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
| @@ -322,6 +322,8 @@ EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu); | |||
| 322 | * This function normally doesn't block and can be called from any context | 322 | * This function normally doesn't block and can be called from any context |
| 323 | * but it may block if @confirm_kill is specified and @ref is in the | 323 | * but it may block if @confirm_kill is specified and @ref is in the |
| 324 | * process of switching to atomic mode by percpu_ref_switch_to_atomic(). | 324 | * process of switching to atomic mode by percpu_ref_switch_to_atomic(). |
| 325 | * | ||
| 326 | * There are no implied RCU grace periods between kill and release. | ||
| 325 | */ | 327 | */ |
| 326 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | 328 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
| 327 | percpu_ref_func_t *confirm_kill) | 329 | percpu_ref_func_t *confirm_kill) |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 0a7ae3288a24..8e00138d593f 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -2125,7 +2125,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) | |||
| 2125 | preempt_enable(); | 2125 | preempt_enable(); |
| 2126 | 2126 | ||
| 2127 | if (!this_cpu_read(ida_bitmap)) { | 2127 | if (!this_cpu_read(ida_bitmap)) { |
| 2128 | struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); | 2128 | struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp); |
| 2129 | if (!bitmap) | 2129 | if (!bitmap) |
| 2130 | return 0; | 2130 | return 0; |
| 2131 | if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap)) | 2131 | if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap)) |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 3825c30aaa36..47de025b6245 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -506,8 +506,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, | |||
| 506 | if (!key || | 506 | if (!key || |
| 507 | (ht->p.obj_cmpfn ? | 507 | (ht->p.obj_cmpfn ? |
| 508 | ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : | 508 | ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : |
| 509 | rhashtable_compare(&arg, rht_obj(ht, head)))) | 509 | rhashtable_compare(&arg, rht_obj(ht, head)))) { |
| 510 | pprev = &head->next; | ||
| 510 | continue; | 511 | continue; |
| 512 | } | ||
| 511 | 513 | ||
| 512 | if (!ht->rhlist) | 514 | if (!ht->rhlist) |
| 513 | return rht_obj(ht, head); | 515 | return rht_obj(ht, head); |
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index b4e22345963f..3e9335493fe4 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
| @@ -24,10 +24,11 @@ | |||
| 24 | #include <linux/if_vlan.h> | 24 | #include <linux/if_vlan.h> |
| 25 | #include <linux/random.h> | 25 | #include <linux/random.h> |
| 26 | #include <linux/highmem.h> | 26 | #include <linux/highmem.h> |
| 27 | #include <linux/sched.h> | ||
| 27 | 28 | ||
| 28 | /* General test specific settings */ | 29 | /* General test specific settings */ |
| 29 | #define MAX_SUBTESTS 3 | 30 | #define MAX_SUBTESTS 3 |
| 30 | #define MAX_TESTRUNS 10000 | 31 | #define MAX_TESTRUNS 1000 |
| 31 | #define MAX_DATA 128 | 32 | #define MAX_DATA 128 |
| 32 | #define MAX_INSNS 512 | 33 | #define MAX_INSNS 512 |
| 33 | #define MAX_K 0xffffFFFF | 34 | #define MAX_K 0xffffFFFF |
| @@ -5466,7 +5467,7 @@ static struct bpf_test tests[] = { | |||
| 5466 | { | 5467 | { |
| 5467 | "BPF_MAXINSNS: Jump, gap, jump, ...", | 5468 | "BPF_MAXINSNS: Jump, gap, jump, ...", |
| 5468 | { }, | 5469 | { }, |
| 5469 | #ifdef CONFIG_BPF_JIT_ALWAYS_ON | 5470 | #if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86) |
| 5470 | CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, | 5471 | CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, |
| 5471 | #else | 5472 | #else |
| 5472 | CLASSIC | FLAG_NO_DATA, | 5473 | CLASSIC | FLAG_NO_DATA, |
| @@ -6582,6 +6583,7 @@ static __init int test_bpf(void) | |||
| 6582 | struct bpf_prog *fp; | 6583 | struct bpf_prog *fp; |
| 6583 | int err; | 6584 | int err; |
| 6584 | 6585 | ||
| 6586 | cond_resched(); | ||
| 6585 | if (exclude_test(i)) | 6587 | if (exclude_test(i)) |
| 6586 | continue; | 6588 | continue; |
| 6587 | 6589 | ||
diff --git a/lib/test_kmod.c b/lib/test_kmod.c index e372b97eee13..0e5b7a61460b 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c | |||
| @@ -1141,7 +1141,7 @@ static struct kmod_test_device *register_test_dev_kmod(void) | |||
| 1141 | mutex_lock(®_dev_mutex); | 1141 | mutex_lock(®_dev_mutex); |
| 1142 | 1142 | ||
| 1143 | /* int should suffice for number of devices, test for wrap */ | 1143 | /* int should suffice for number of devices, test for wrap */ |
| 1144 | if (unlikely(num_test_devs + 1) < 0) { | 1144 | if (num_test_devs + 1 == INT_MAX) { |
| 1145 | pr_err("reached limit of number of test devices\n"); | 1145 | pr_err("reached limit of number of test devices\n"); |
| 1146 | goto out; | 1146 | goto out; |
| 1147 | } | 1147 | } |
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 76d3667fdea2..f4000c137dbe 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c | |||
| @@ -79,6 +79,21 @@ struct thread_data { | |||
| 79 | struct test_obj *objs; | 79 | struct test_obj *objs; |
| 80 | }; | 80 | }; |
| 81 | 81 | ||
| 82 | static u32 my_hashfn(const void *data, u32 len, u32 seed) | ||
| 83 | { | ||
| 84 | const struct test_obj_rhl *obj = data; | ||
| 85 | |||
| 86 | return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE; | ||
| 87 | } | ||
| 88 | |||
| 89 | static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj) | ||
| 90 | { | ||
| 91 | const struct test_obj_rhl *test_obj = obj; | ||
| 92 | const struct test_obj_val *val = arg->key; | ||
| 93 | |||
| 94 | return test_obj->value.id - val->id; | ||
| 95 | } | ||
| 96 | |||
| 82 | static struct rhashtable_params test_rht_params = { | 97 | static struct rhashtable_params test_rht_params = { |
| 83 | .head_offset = offsetof(struct test_obj, node), | 98 | .head_offset = offsetof(struct test_obj, node), |
| 84 | .key_offset = offsetof(struct test_obj, value), | 99 | .key_offset = offsetof(struct test_obj, value), |
| @@ -87,6 +102,17 @@ static struct rhashtable_params test_rht_params = { | |||
| 87 | .nulls_base = (3U << RHT_BASE_SHIFT), | 102 | .nulls_base = (3U << RHT_BASE_SHIFT), |
| 88 | }; | 103 | }; |
| 89 | 104 | ||
| 105 | static struct rhashtable_params test_rht_params_dup = { | ||
| 106 | .head_offset = offsetof(struct test_obj_rhl, list_node), | ||
| 107 | .key_offset = offsetof(struct test_obj_rhl, value), | ||
| 108 | .key_len = sizeof(struct test_obj_val), | ||
| 109 | .hashfn = jhash, | ||
| 110 | .obj_hashfn = my_hashfn, | ||
| 111 | .obj_cmpfn = my_cmpfn, | ||
| 112 | .nelem_hint = 128, | ||
| 113 | .automatic_shrinking = false, | ||
| 114 | }; | ||
| 115 | |||
| 90 | static struct semaphore prestart_sem; | 116 | static struct semaphore prestart_sem; |
| 91 | static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); | 117 | static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); |
| 92 | 118 | ||
| @@ -465,6 +491,112 @@ static int __init test_rhashtable_max(struct test_obj *array, | |||
| 465 | return err; | 491 | return err; |
| 466 | } | 492 | } |
| 467 | 493 | ||
| 494 | static unsigned int __init print_ht(struct rhltable *rhlt) | ||
| 495 | { | ||
| 496 | struct rhashtable *ht; | ||
| 497 | const struct bucket_table *tbl; | ||
| 498 | char buff[512] = ""; | ||
| 499 | unsigned int i, cnt = 0; | ||
| 500 | |||
| 501 | ht = &rhlt->ht; | ||
| 502 | tbl = rht_dereference(ht->tbl, ht); | ||
| 503 | for (i = 0; i < tbl->size; i++) { | ||
| 504 | struct rhash_head *pos, *next; | ||
| 505 | struct test_obj_rhl *p; | ||
| 506 | |||
| 507 | pos = rht_dereference(tbl->buckets[i], ht); | ||
| 508 | next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; | ||
| 509 | |||
| 510 | if (!rht_is_a_nulls(pos)) { | ||
| 511 | sprintf(buff, "%s\nbucket[%d] -> ", buff, i); | ||
| 512 | } | ||
| 513 | |||
| 514 | while (!rht_is_a_nulls(pos)) { | ||
| 515 | struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead); | ||
| 516 | sprintf(buff, "%s[[", buff); | ||
| 517 | do { | ||
| 518 | pos = &list->rhead; | ||
| 519 | list = rht_dereference(list->next, ht); | ||
| 520 | p = rht_obj(ht, pos); | ||
| 521 | |||
| 522 | sprintf(buff, "%s val %d (tid=%d)%s", buff, p->value.id, p->value.tid, | ||
| 523 | list? ", " : " "); | ||
| 524 | cnt++; | ||
| 525 | } while (list); | ||
| 526 | |||
| 527 | pos = next, | ||
| 528 | next = !rht_is_a_nulls(pos) ? | ||
| 529 | rht_dereference(pos->next, ht) : NULL; | ||
| 530 | |||
| 531 | sprintf(buff, "%s]]%s", buff, !rht_is_a_nulls(pos) ? " -> " : ""); | ||
| 532 | } | ||
| 533 | } | ||
| 534 | printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff); | ||
| 535 | |||
| 536 | return cnt; | ||
| 537 | } | ||
| 538 | |||
| 539 | static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, | ||
| 540 | int cnt, bool slow) | ||
| 541 | { | ||
| 542 | struct rhltable rhlt; | ||
| 543 | unsigned int i, ret; | ||
| 544 | const char *key; | ||
| 545 | int err = 0; | ||
| 546 | |||
| 547 | err = rhltable_init(&rhlt, &test_rht_params_dup); | ||
| 548 | if (WARN_ON(err)) | ||
| 549 | return err; | ||
| 550 | |||
| 551 | for (i = 0; i < cnt; i++) { | ||
| 552 | rhl_test_objects[i].value.tid = i; | ||
| 553 | key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead); | ||
| 554 | key += test_rht_params_dup.key_offset; | ||
| 555 | |||
| 556 | if (slow) { | ||
| 557 | err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key, | ||
| 558 | &rhl_test_objects[i].list_node.rhead)); | ||
| 559 | if (err == -EAGAIN) | ||
| 560 | err = 0; | ||
| 561 | } else | ||
| 562 | err = rhltable_insert(&rhlt, | ||
| 563 | &rhl_test_objects[i].list_node, | ||
| 564 | test_rht_params_dup); | ||
| 565 | if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) | ||
| 566 | goto skip_print; | ||
| 567 | } | ||
| 568 | |||
| 569 | ret = print_ht(&rhlt); | ||
| 570 | WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); | ||
| 571 | |||
| 572 | skip_print: | ||
| 573 | rhltable_destroy(&rhlt); | ||
| 574 | |||
| 575 | return 0; | ||
| 576 | } | ||
| 577 | |||
| 578 | static int __init test_insert_duplicates_run(void) | ||
| 579 | { | ||
| 580 | struct test_obj_rhl rhl_test_objects[3] = {}; | ||
| 581 | |||
| 582 | pr_info("test inserting duplicates\n"); | ||
| 583 | |||
| 584 | /* two different values that map to same bucket */ | ||
| 585 | rhl_test_objects[0].value.id = 1; | ||
| 586 | rhl_test_objects[1].value.id = 21; | ||
| 587 | |||
| 588 | /* and another duplicate with same as [0] value | ||
| 589 | * which will be second on the bucket list */ | ||
| 590 | rhl_test_objects[2].value.id = rhl_test_objects[0].value.id; | ||
| 591 | |||
| 592 | test_insert_dup(rhl_test_objects, 2, false); | ||
| 593 | test_insert_dup(rhl_test_objects, 3, false); | ||
| 594 | test_insert_dup(rhl_test_objects, 2, true); | ||
| 595 | test_insert_dup(rhl_test_objects, 3, true); | ||
| 596 | |||
| 597 | return 0; | ||
| 598 | } | ||
| 599 | |||
| 468 | static int thread_lookup_test(struct thread_data *tdata) | 600 | static int thread_lookup_test(struct thread_data *tdata) |
| 469 | { | 601 | { |
| 470 | unsigned int entries = tdata->entries; | 602 | unsigned int entries = tdata->entries; |
| @@ -613,6 +745,8 @@ static int __init test_rht_init(void) | |||
| 613 | do_div(total_time, runs); | 745 | do_div(total_time, runs); |
| 614 | pr_info("Average test time: %llu\n", total_time); | 746 | pr_info("Average test time: %llu\n", total_time); |
| 615 | 747 | ||
| 748 | test_insert_duplicates_run(); | ||
| 749 | |||
| 616 | if (!tcount) | 750 | if (!tcount) |
| 617 | return 0; | 751 | return 0; |
| 618 | 752 | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 77ee6ced11b1..d7a708f82559 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -1849,7 +1849,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 1849 | { | 1849 | { |
| 1850 | const int default_width = 2 * sizeof(void *); | 1850 | const int default_width = 2 * sizeof(void *); |
| 1851 | 1851 | ||
| 1852 | if (!ptr && *fmt != 'K') { | 1852 | if (!ptr && *fmt != 'K' && *fmt != 'x') { |
| 1853 | /* | 1853 | /* |
| 1854 | * Print (null) with the same width as a pointer so it makes | 1854 | * Print (null) with the same width as a pointer so it makes |
| 1855 | * tabular output look nice. | 1855 | * tabular output look nice. |
