diff options
author | Matthew Wilcox <mawilcox@microsoft.com> | 2016-12-14 18:09:31 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 19:04:10 -0500 |
commit | e8de4340767dd002978c285e3adddaeda8ac652c (patch) | |
tree | 3e14e4a3b7b0874e4e824b7873558a1017d2e368 /tools | |
parent | bbe9d71f2c545398987a6fea5090a6ca76f4a8dc (diff) |
radix-tree: ensure counts are initialised
radix_tree_join() was freeing nodes with a non-zero ->exceptional count,
and radix_tree_split() wasn't zeroing ->exceptional when it allocated
the new node. Fix this by making all callers of radix_tree_node_alloc()
pass in the new counts (and some other always-initialised fields), which
will prevent the problem recurring if in future we decide to do
something similar.
Link: http://lkml.kernel.org/r/1481667692-14500-3-git-send-email-mawilcox@linuxonhyperv.com
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/testing/radix-tree/multiorder.c | 45 |
1 files changed, 41 insertions, 4 deletions
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 08b4e16dc86f..f79812a5e070 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c | |||
@@ -355,7 +355,7 @@ void multiorder_tagged_iteration(void) | |||
355 | item_kill_tree(&tree); | 355 | item_kill_tree(&tree); |
356 | } | 356 | } |
357 | 357 | ||
358 | static void __multiorder_join(unsigned long index, | 358 | static void multiorder_join1(unsigned long index, |
359 | unsigned order1, unsigned order2) | 359 | unsigned order1, unsigned order2) |
360 | { | 360 | { |
361 | unsigned long loc; | 361 | unsigned long loc; |
@@ -373,7 +373,7 @@ static void __multiorder_join(unsigned long index, | |||
373 | item_kill_tree(&tree); | 373 | item_kill_tree(&tree); |
374 | } | 374 | } |
375 | 375 | ||
376 | static void __multiorder_join2(unsigned order1, unsigned order2) | 376 | static void multiorder_join2(unsigned order1, unsigned order2) |
377 | { | 377 | { |
378 | RADIX_TREE(tree, GFP_KERNEL); | 378 | RADIX_TREE(tree, GFP_KERNEL); |
379 | struct radix_tree_node *node; | 379 | struct radix_tree_node *node; |
@@ -393,6 +393,39 @@ static void __multiorder_join2(unsigned order1, unsigned order2) | |||
393 | item_kill_tree(&tree); | 393 | item_kill_tree(&tree); |
394 | } | 394 | } |
395 | 395 | ||
396 | /* | ||
397 | * This test revealed an accounting bug for exceptional entries at one point. | ||
398 | * Nodes were being freed back into the pool with an elevated exception count | ||
399 | * by radix_tree_join() and then radix_tree_split() was failing to zero the | ||
400 | * count of exceptional entries. | ||
401 | */ | ||
402 | static void multiorder_join3(unsigned int order) | ||
403 | { | ||
404 | RADIX_TREE(tree, GFP_KERNEL); | ||
405 | struct radix_tree_node *node; | ||
406 | void **slot; | ||
407 | struct radix_tree_iter iter; | ||
408 | unsigned long i; | ||
409 | |||
410 | for (i = 0; i < (1 << order); i++) { | ||
411 | radix_tree_insert(&tree, i, (void *)0x12UL); | ||
412 | } | ||
413 | |||
414 | radix_tree_join(&tree, 0, order, (void *)0x16UL); | ||
415 | rcu_barrier(); | ||
416 | |||
417 | radix_tree_split(&tree, 0, 0); | ||
418 | |||
419 | radix_tree_for_each_slot(slot, &tree, &iter, 0) { | ||
420 | radix_tree_iter_replace(&tree, &iter, slot, (void *)0x12UL); | ||
421 | } | ||
422 | |||
423 | __radix_tree_lookup(&tree, 0, &node, NULL); | ||
424 | assert(node->exceptional == node->count); | ||
425 | |||
426 | item_kill_tree(&tree); | ||
427 | } | ||
428 | |||
396 | static void multiorder_join(void) | 429 | static void multiorder_join(void) |
397 | { | 430 | { |
398 | int i, j, idx; | 431 | int i, j, idx; |
@@ -400,16 +433,20 @@ static void multiorder_join(void) | |||
400 | for (idx = 0; idx < 1024; idx = idx * 2 + 3) { | 433 | for (idx = 0; idx < 1024; idx = idx * 2 + 3) { |
401 | for (i = 1; i < 15; i++) { | 434 | for (i = 1; i < 15; i++) { |
402 | for (j = 0; j < i; j++) { | 435 | for (j = 0; j < i; j++) { |
403 | __multiorder_join(idx, i, j); | 436 | multiorder_join1(idx, i, j); |
404 | } | 437 | } |
405 | } | 438 | } |
406 | } | 439 | } |
407 | 440 | ||
408 | for (i = 1; i < 15; i++) { | 441 | for (i = 1; i < 15; i++) { |
409 | for (j = 0; j < i; j++) { | 442 | for (j = 0; j < i; j++) { |
410 | __multiorder_join2(i, j); | 443 | multiorder_join2(i, j); |
411 | } | 444 | } |
412 | } | 445 | } |
446 | |||
447 | for (i = 3; i < 10; i++) { | ||
448 | multiorder_join3(i); | ||
449 | } | ||
413 | } | 450 | } |
414 | 451 | ||
415 | static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc) | 452 | static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc) |