aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <mawilcox@microsoft.com>2016-12-14 18:09:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 19:04:10 -0500
commite8de4340767dd002978c285e3adddaeda8ac652c (patch)
tree3e14e4a3b7b0874e4e824b7873558a1017d2e368
parentbbe9d71f2c545398987a6fea5090a6ca76f4a8dc (diff)
radix-tree: ensure counts are initialised
radix_tree_join() was freeing nodes with a non-zero ->exceptional count, and radix_tree_split() wasn't zeroing ->exceptional when it allocated the new node. Fix this by making all callers of radix_tree_node_alloc() pass in the new counts (and some other always-initialised fields), which will prevent the problem recurring if in future we decide to do something similar. Link: http://lkml.kernel.org/r/1481667692-14500-3-git-send-email-mawilcox@linuxonhyperv.com Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--lib/radix-tree.c41
-rw-r--r--tools/testing/radix-tree/multiorder.c45
2 files changed, 61 insertions, 25 deletions
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index d09c17dd60ae..0019aca0f328 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -288,7 +288,10 @@ static void radix_tree_dump(struct radix_tree_root *root)
288 * that the caller has pinned this thread of control to the current CPU. 288 * that the caller has pinned this thread of control to the current CPU.
289 */ 289 */
290static struct radix_tree_node * 290static struct radix_tree_node *
291radix_tree_node_alloc(struct radix_tree_root *root) 291radix_tree_node_alloc(struct radix_tree_root *root,
292 struct radix_tree_node *parent,
293 unsigned int shift, unsigned int offset,
294 unsigned int count, unsigned int exceptional)
292{ 295{
293 struct radix_tree_node *ret = NULL; 296 struct radix_tree_node *ret = NULL;
294 gfp_t gfp_mask = root_gfp_mask(root); 297 gfp_t gfp_mask = root_gfp_mask(root);
@@ -333,6 +336,13 @@ radix_tree_node_alloc(struct radix_tree_root *root)
333 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); 336 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
334out: 337out:
335 BUG_ON(radix_tree_is_internal_node(ret)); 338 BUG_ON(radix_tree_is_internal_node(ret));
339 if (ret) {
340 ret->parent = parent;
341 ret->shift = shift;
342 ret->offset = offset;
343 ret->count = count;
344 ret->exceptional = exceptional;
345 }
336 return ret; 346 return ret;
337} 347}
338 348
@@ -538,8 +548,8 @@ static int radix_tree_extend(struct radix_tree_root *root,
538 goto out; 548 goto out;
539 549
540 do { 550 do {
541 struct radix_tree_node *node = radix_tree_node_alloc(root); 551 struct radix_tree_node *node = radix_tree_node_alloc(root,
542 552 NULL, shift, 0, 1, 0);
543 if (!node) 553 if (!node)
544 return -ENOMEM; 554 return -ENOMEM;
545 555
@@ -550,16 +560,11 @@ static int radix_tree_extend(struct radix_tree_root *root,
550 } 560 }
551 561
552 BUG_ON(shift > BITS_PER_LONG); 562 BUG_ON(shift > BITS_PER_LONG);
553 node->shift = shift;
554 node->offset = 0;
555 node->count = 1;
556 node->parent = NULL;
557 if (radix_tree_is_internal_node(slot)) { 563 if (radix_tree_is_internal_node(slot)) {
558 entry_to_node(slot)->parent = node; 564 entry_to_node(slot)->parent = node;
559 } else { 565 } else if (radix_tree_exceptional_entry(slot)) {
560 /* Moving an exceptional root->rnode to a node */ 566 /* Moving an exceptional root->rnode to a node */
561 if (radix_tree_exceptional_entry(slot)) 567 node->exceptional = 1;
562 node->exceptional = 1;
563 } 568 }
564 node->slots[0] = slot; 569 node->slots[0] = slot;
565 slot = node_to_entry(node); 570 slot = node_to_entry(node);
@@ -712,14 +717,10 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
712 shift -= RADIX_TREE_MAP_SHIFT; 717 shift -= RADIX_TREE_MAP_SHIFT;
713 if (child == NULL) { 718 if (child == NULL) {
714 /* Have to add a child node. */ 719 /* Have to add a child node. */
715 child = radix_tree_node_alloc(root); 720 child = radix_tree_node_alloc(root, node, shift,
721 offset, 0, 0);
716 if (!child) 722 if (!child)
717 return -ENOMEM; 723 return -ENOMEM;
718 child->shift = shift;
719 child->offset = offset;
720 child->count = 0;
721 child->exceptional = 0;
722 child->parent = node;
723 rcu_assign_pointer(*slot, node_to_entry(child)); 724 rcu_assign_pointer(*slot, node_to_entry(child));
724 if (node) 725 if (node)
725 node->count++; 726 node->count++;
@@ -1209,13 +1210,11 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
1209 1210
1210 for (;;) { 1211 for (;;) {
1211 if (node->shift > order) { 1212 if (node->shift > order) {
1212 child = radix_tree_node_alloc(root); 1213 child = radix_tree_node_alloc(root, node,
1214 node->shift - RADIX_TREE_MAP_SHIFT,
1215 offset, 0, 0);
1213 if (!child) 1216 if (!child)
1214 goto nomem; 1217 goto nomem;
1215 child->shift = node->shift - RADIX_TREE_MAP_SHIFT;
1216 child->offset = offset;
1217 child->count = 0;
1218 child->parent = node;
1219 if (node != parent) { 1218 if (node != parent) {
1220 node->count++; 1219 node->count++;
1221 node->slots[offset] = node_to_entry(child); 1220 node->slots[offset] = node_to_entry(child);
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index 08b4e16dc86f..f79812a5e070 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -355,7 +355,7 @@ void multiorder_tagged_iteration(void)
355 item_kill_tree(&tree); 355 item_kill_tree(&tree);
356} 356}
357 357
358static void __multiorder_join(unsigned long index, 358static void multiorder_join1(unsigned long index,
359 unsigned order1, unsigned order2) 359 unsigned order1, unsigned order2)
360{ 360{
361 unsigned long loc; 361 unsigned long loc;
@@ -373,7 +373,7 @@ static void __multiorder_join(unsigned long index,
373 item_kill_tree(&tree); 373 item_kill_tree(&tree);
374} 374}
375 375
376static void __multiorder_join2(unsigned order1, unsigned order2) 376static void multiorder_join2(unsigned order1, unsigned order2)
377{ 377{
378 RADIX_TREE(tree, GFP_KERNEL); 378 RADIX_TREE(tree, GFP_KERNEL);
379 struct radix_tree_node *node; 379 struct radix_tree_node *node;
@@ -393,6 +393,39 @@ static void __multiorder_join2(unsigned order1, unsigned order2)
393 item_kill_tree(&tree); 393 item_kill_tree(&tree);
394} 394}
395 395
396/*
397 * This test revealed an accounting bug for exceptional entries at one point.
398 * Nodes were being freed back into the pool with an elevated exception count
399 * by radix_tree_join() and then radix_tree_split() was failing to zero the
400 * count of exceptional entries.
401 */
402static void multiorder_join3(unsigned int order)
403{
404 RADIX_TREE(tree, GFP_KERNEL);
405 struct radix_tree_node *node;
406 void **slot;
407 struct radix_tree_iter iter;
408 unsigned long i;
409
410 for (i = 0; i < (1 << order); i++) {
411 radix_tree_insert(&tree, i, (void *)0x12UL);
412 }
413
414 radix_tree_join(&tree, 0, order, (void *)0x16UL);
415 rcu_barrier();
416
417 radix_tree_split(&tree, 0, 0);
418
419 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
420 radix_tree_iter_replace(&tree, &iter, slot, (void *)0x12UL);
421 }
422
423 __radix_tree_lookup(&tree, 0, &node, NULL);
424 assert(node->exceptional == node->count);
425
426 item_kill_tree(&tree);
427}
428
396static void multiorder_join(void) 429static void multiorder_join(void)
397{ 430{
398 int i, j, idx; 431 int i, j, idx;
@@ -400,16 +433,20 @@ static void multiorder_join(void)
400 for (idx = 0; idx < 1024; idx = idx * 2 + 3) { 433 for (idx = 0; idx < 1024; idx = idx * 2 + 3) {
401 for (i = 1; i < 15; i++) { 434 for (i = 1; i < 15; i++) {
402 for (j = 0; j < i; j++) { 435 for (j = 0; j < i; j++) {
403 __multiorder_join(idx, i, j); 436 multiorder_join1(idx, i, j);
404 } 437 }
405 } 438 }
406 } 439 }
407 440
408 for (i = 1; i < 15; i++) { 441 for (i = 1; i < 15; i++) {
409 for (j = 0; j < i; j++) { 442 for (j = 0; j < i; j++) {
410 __multiorder_join2(i, j); 443 multiorder_join2(i, j);
411 } 444 }
412 } 445 }
446
447 for (i = 3; i < 10; i++) {
448 multiorder_join3(i);
449 }
413} 450}
414 451
415static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc) 452static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc)