aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat_rbtree.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2010-05-29 09:31:43 -0400
committerIngo Molnar <mingo@elte.hu>2010-07-05 08:43:50 -0400
commitb945d6b2554d550fe95caadc61e521c0ad71fb9c (patch)
tree0b76cdb978bead82188de40cae6d24bd88d71b7d /arch/x86/mm/pat_rbtree.c
parentd596043d71ff0d7b3d0bead19b1d68c55f003093 (diff)
rbtree: Undo augmented trees performance damage and regression
Reimplement augmented RB-trees without sprinkling extra branches all over the RB-tree code (which lives in the scheduler hot path). This approach is 'borrowed' from Fabio's BFQ implementation and relies on traversing the rebalance path after the RB-tree-op to correct the heap property for insertion/removal and make up for the damage done by the tree rotations. For insertion the rebalance path is trivially that from the new node upwards to the root, for removal it is that from the deepest node in the path from the to be removed node that will still be around after the removal. [ This patch also fixes a video driver regression reported by Ali Gholami Rudi - the memtype->subtree_max_end was updated incorrectly. ] Acked-by: Suresh Siddha <suresh.b.siddha@intel.com> Acked-by: Venkatesh Pallipadi <venki@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Ali Gholami Rudi <ali@rudi.ir> Cc: Fabio Checconi <fabio@gandalf.sssup.it> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> LKML-Reference: <1275414172.27810.27961.camel@twins> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/pat_rbtree.c')
-rw-r--r--arch/x86/mm/pat_rbtree.c34
1 files changed, 6 insertions, 28 deletions
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
index f20eeec85a86..8acaddd0fb21 100644
--- a/arch/x86/mm/pat_rbtree.c
+++ b/arch/x86/mm/pat_rbtree.c
@@ -34,8 +34,7 @@
34 * memtype_lock protects the rbtree. 34 * memtype_lock protects the rbtree.
35 */ 35 */
36 36
37static void memtype_rb_augment_cb(struct rb_node *node); 37static struct rb_root memtype_rbroot = RB_ROOT;
38static struct rb_root memtype_rbroot = RB_AUGMENT_ROOT(&memtype_rb_augment_cb);
39 38
40static int is_node_overlap(struct memtype *node, u64 start, u64 end) 39static int is_node_overlap(struct memtype *node, u64 start, u64 end)
41{ 40{
@@ -56,7 +55,7 @@ static u64 get_subtree_max_end(struct rb_node *node)
56} 55}
57 56
58/* Update 'subtree_max_end' for a node, based on node and its children */ 57/* Update 'subtree_max_end' for a node, based on node and its children */
59static void update_node_max_end(struct rb_node *node) 58static void memtype_rb_augment_cb(struct rb_node *node, void *__unused)
60{ 59{
61 struct memtype *data; 60 struct memtype *data;
62 u64 max_end, child_max_end; 61 u64 max_end, child_max_end;
@@ -78,25 +77,6 @@ static void update_node_max_end(struct rb_node *node)
78 data->subtree_max_end = max_end; 77 data->subtree_max_end = max_end;
79} 78}
80 79
81/* Update 'subtree_max_end' for a node and all its ancestors */
82static void update_path_max_end(struct rb_node *node)
83{
84 u64 old_max_end, new_max_end;
85
86 while (node) {
87 struct memtype *data = container_of(node, struct memtype, rb);
88
89 old_max_end = data->subtree_max_end;
90 update_node_max_end(node);
91 new_max_end = data->subtree_max_end;
92
93 if (new_max_end == old_max_end)
94 break;
95
96 node = rb_parent(node);
97 }
98}
99
100/* Find the first (lowest start addr) overlapping range from rb tree */ 80/* Find the first (lowest start addr) overlapping range from rb tree */
101static struct memtype *memtype_rb_lowest_match(struct rb_root *root, 81static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
102 u64 start, u64 end) 82 u64 start, u64 end)
@@ -190,12 +170,6 @@ failure:
190 return -EBUSY; 170 return -EBUSY;
191} 171}
192 172
193static void memtype_rb_augment_cb(struct rb_node *node)
194{
195 if (node)
196 update_path_max_end(node);
197}
198
199static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) 173static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
200{ 174{
201 struct rb_node **node = &(root->rb_node); 175 struct rb_node **node = &(root->rb_node);
@@ -213,6 +187,7 @@ static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
213 187
214 rb_link_node(&newdata->rb, parent, node); 188 rb_link_node(&newdata->rb, parent, node);
215 rb_insert_color(&newdata->rb, root); 189 rb_insert_color(&newdata->rb, root);
190 rb_augment_insert(&newdata->rb, memtype_rb_augment_cb, NULL);
216} 191}
217 192
218int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) 193int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type)
@@ -234,13 +209,16 @@ int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type)
234 209
235struct memtype *rbt_memtype_erase(u64 start, u64 end) 210struct memtype *rbt_memtype_erase(u64 start, u64 end)
236{ 211{
212 struct rb_node *deepest;
237 struct memtype *data; 213 struct memtype *data;
238 214
239 data = memtype_rb_exact_match(&memtype_rbroot, start, end); 215 data = memtype_rb_exact_match(&memtype_rbroot, start, end);
240 if (!data) 216 if (!data)
241 goto out; 217 goto out;
242 218
219 deepest = rb_augment_erase_begin(&data->rb);
243 rb_erase(&data->rb, &memtype_rbroot); 220 rb_erase(&data->rb, &memtype_rbroot);
221 rb_augment_erase_end(deepest, memtype_rb_augment_cb, NULL);
244out: 222out:
245 return data; 223 return data;
246} 224}