aboutsummaryrefslogtreecommitdiffstats
path: root/lib/rbtree.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2017-09-08 19:14:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-08 21:26:48 -0400
commitcd9e61ed1eebbcd5dfad59475d41ec58d9b64b6a (patch)
treec1d649fce5c7a7be138856e172c2304424c1a2e9 /lib/rbtree.c
parentc32ee3d9abd284b4fcaacc250b101f93829c7bae (diff)
rbtree: cache leftmost node internally
Patch series "rbtree: Cache leftmost node internally", v4. A series to extending rbtrees to internally cache the leftmost node such that we can have fast overlap check optimization for all interval tree users[1]. The benefits of this series are that: (i) Unify users that do internal leftmost node caching. (ii) Optimize all interval tree users. (iii) Convert at least two new users (epoll and procfs) to the new interface. This patch (of 16): Red-black tree semantics imply that nodes with smaller or greater (or equal for duplicates) keys always be to the left and right, respectively. For the kernel this is extremely evident when considering our rb_first() semantics. Enabling lookups for the smallest node in the tree in O(1) can save a good chunk of cycles in not having to walk down the tree each time. To this end there are a few core users that explicitly do this, such as the scheduler and rtmutexes. There is also the desire for interval trees to have this optimization allowing faster overlap checking. This patch introduces a new 'struct rb_root_cached' which is just the root with a cached pointer to the leftmost node. The reason why the regular rb_root was not extended instead of adding a new structure was that this allows the user to have the choice between memory footprint and actual tree performance. The new wrappers on top of the regular rb_root calls are: - rb_first_cached(cached_root) -- which is a fast replacement for rb_first. - rb_insert_color_cached(node, cached_root, new) - rb_erase_cached(node, cached_root) In addition, augmented cached interfaces are also added for basic insertion and deletion operations; which becomes important for the interval tree changes. With the exception of the inserts, which adds a bool for updating the new leftmost, the interfaces are kept the same. To this end, porting rb users to the cached version becomes really trivial, and keeping current rbtree semantics for users that don't care about the optimization requires zero overhead. Link: http://lkml.kernel.org/r/20170719014603.19029-2-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Reviewed-by: Jan Kara <jack@suse.cz> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib/rbtree.c')
-rw-r--r--lib/rbtree.c34
1 files changed, 29 insertions, 5 deletions
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 4ba2828a67c0..d102d9d2ffaa 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -95,10 +95,14 @@ __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
95 95
96static __always_inline void 96static __always_inline void
97__rb_insert(struct rb_node *node, struct rb_root *root, 97__rb_insert(struct rb_node *node, struct rb_root *root,
98 bool newleft, struct rb_node **leftmost,
98 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) 99 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
99{ 100{
100 struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; 101 struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
101 102
103 if (newleft)
104 *leftmost = node;
105
102 while (true) { 106 while (true) {
103 /* 107 /*
104 * Loop invariant: node is red 108 * Loop invariant: node is red
@@ -434,19 +438,38 @@ static const struct rb_augment_callbacks dummy_callbacks = {
434 438
435void rb_insert_color(struct rb_node *node, struct rb_root *root) 439void rb_insert_color(struct rb_node *node, struct rb_root *root)
436{ 440{
437 __rb_insert(node, root, dummy_rotate); 441 __rb_insert(node, root, false, NULL, dummy_rotate);
438} 442}
439EXPORT_SYMBOL(rb_insert_color); 443EXPORT_SYMBOL(rb_insert_color);
440 444
441void rb_erase(struct rb_node *node, struct rb_root *root) 445void rb_erase(struct rb_node *node, struct rb_root *root)
442{ 446{
443 struct rb_node *rebalance; 447 struct rb_node *rebalance;
444 rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); 448 rebalance = __rb_erase_augmented(node, root,
449 NULL, &dummy_callbacks);
445 if (rebalance) 450 if (rebalance)
446 ____rb_erase_color(rebalance, root, dummy_rotate); 451 ____rb_erase_color(rebalance, root, dummy_rotate);
447} 452}
448EXPORT_SYMBOL(rb_erase); 453EXPORT_SYMBOL(rb_erase);
449 454
455void rb_insert_color_cached(struct rb_node *node,
456 struct rb_root_cached *root, bool leftmost)
457{
458 __rb_insert(node, &root->rb_root, leftmost,
459 &root->rb_leftmost, dummy_rotate);
460}
461EXPORT_SYMBOL(rb_insert_color_cached);
462
463void rb_erase_cached(struct rb_node *node, struct rb_root_cached *root)
464{
465 struct rb_node *rebalance;
466 rebalance = __rb_erase_augmented(node, &root->rb_root,
467 &root->rb_leftmost, &dummy_callbacks);
468 if (rebalance)
469 ____rb_erase_color(rebalance, &root->rb_root, dummy_rotate);
470}
471EXPORT_SYMBOL(rb_erase_cached);
472
450/* 473/*
451 * Augmented rbtree manipulation functions. 474 * Augmented rbtree manipulation functions.
452 * 475 *
@@ -455,9 +478,10 @@ EXPORT_SYMBOL(rb_erase);
455 */ 478 */
456 479
457void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, 480void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
481 bool newleft, struct rb_node **leftmost,
458 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) 482 void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
459{ 483{
460 __rb_insert(node, root, augment_rotate); 484 __rb_insert(node, root, newleft, leftmost, augment_rotate);
461} 485}
462EXPORT_SYMBOL(__rb_insert_augmented); 486EXPORT_SYMBOL(__rb_insert_augmented);
463 487
@@ -502,7 +526,7 @@ struct rb_node *rb_next(const struct rb_node *node)
502 * as we can. 526 * as we can.
503 */ 527 */
504 if (node->rb_right) { 528 if (node->rb_right) {
505 node = node->rb_right; 529 node = node->rb_right;
506 while (node->rb_left) 530 while (node->rb_left)
507 node=node->rb_left; 531 node=node->rb_left;
508 return (struct rb_node *)node; 532 return (struct rb_node *)node;
@@ -534,7 +558,7 @@ struct rb_node *rb_prev(const struct rb_node *node)
534 * as we can. 558 * as we can.
535 */ 559 */
536 if (node->rb_left) { 560 if (node->rb_left) {
537 node = node->rb_left; 561 node = node->rb_left;
538 while (node->rb_right) 562 while (node->rb_right)
539 node=node->rb_right; 563 node=node->rb_right;
540 return (struct rb_node *)node; 564 return (struct rb_node *)node;