aboutsummaryrefslogtreecommitdiffstats
path: root/Documentation/rbtree.txt
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2012-10-30 03:09:18 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2012-10-30 03:09:18 -0400
commit68fe0f0a6dcd2ac1ace5da3647a6d1cf0f4d2fea (patch)
tree69d855c2e46d42ef8f23680cf3a1e6ff38de9ef4 /Documentation/rbtree.txt
parent88fd449e734a4264347e12b8ff74ccb33a9b9a35 (diff)
parent8f0d8163b50e01f398b14bcd4dc039ac5ab18d64 (diff)
Merge tag 'v3.7-rc3' into for-linus to sync up with recent USB changes
Diffstat (limited to 'Documentation/rbtree.txt')
-rw-r--r--Documentation/rbtree.txt209
1 files changed, 173 insertions, 36 deletions
diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt
index 8d32d85a523..61b6c48871a 100644
--- a/Documentation/rbtree.txt
+++ b/Documentation/rbtree.txt
@@ -193,24 +193,55 @@ Example:
193Support for Augmented rbtrees 193Support for Augmented rbtrees
194----------------------------- 194-----------------------------
195 195
196Augmented rbtree is an rbtree with "some" additional data stored in each node. 196Augmented rbtree is an rbtree with "some" additional data stored in
197This data can be used to augment some new functionality to rbtree. 197each node, where the additional data for node N must be a function of
198Augmented rbtree is an optional feature built on top of basic rbtree 198the contents of all nodes in the subtree rooted at N. This data can
199infrastructure. An rbtree user who wants this feature will have to call the 199be used to augment some new functionality to rbtree. Augmented rbtree
200augmentation functions with the user provided augmentation callback 200is an optional feature built on top of basic rbtree infrastructure.
201when inserting and erasing nodes. 201An rbtree user who wants this feature will have to call the augmentation
202 202functions with the user provided augmentation callback when inserting
203On insertion, the user must call rb_augment_insert() once the new node is in 203and erasing nodes.
204place. This will cause the augmentation function callback to be called for 204
205each node between the new node and the root which has been affected by the 205C files implementing augmented rbtree manipulation must include
206insertion. 206<linux/rbtree_augmented.h> instead of <linus/rbtree.h>. Note that
207 207linux/rbtree_augmented.h exposes some rbtree implementations details
208When erasing a node, the user must call rb_augment_erase_begin() first to 208you are not expected to rely on; please stick to the documented APIs
209retrieve the deepest node on the rebalance path. Then, after erasing the 209there and do not include <linux/rbtree_augmented.h> from header files
210original node, the user must call rb_augment_erase_end() with the deepest 210either so as to minimize chances of your users accidentally relying on
211node found earlier. This will cause the augmentation function to be called 211such implementation details.
212for each affected node between the deepest node and the root. 212
213 213On insertion, the user must update the augmented information on the path
214leading to the inserted node, then call rb_link_node() as usual and
215rb_augment_inserted() instead of the usual rb_insert_color() call.
216If rb_augment_inserted() rebalances the rbtree, it will callback into
217a user provided function to update the augmented information on the
218affected subtrees.
219
220When erasing a node, the user must call rb_erase_augmented() instead of
221rb_erase(). rb_erase_augmented() calls back into user provided functions
222to updated the augmented information on affected subtrees.
223
224In both cases, the callbacks are provided through struct rb_augment_callbacks.
2253 callbacks must be defined:
226
227- A propagation callback, which updates the augmented value for a given
228 node and its ancestors, up to a given stop point (or NULL to update
229 all the way to the root).
230
231- A copy callback, which copies the augmented value for a given subtree
232 to a newly assigned subtree root.
233
234- A tree rotation callback, which copies the augmented value for a given
235 subtree to a newly assigned subtree root AND recomputes the augmented
236 information for the former subtree root.
237
238The compiled code for rb_erase_augmented() may inline the propagation and
239copy callbacks, which results in a large function, so each augmented rbtree
240user should have a single rb_erase_augmented() call site in order to limit
241compiled code size.
242
243
244Sample usage:
214 245
215Interval tree is an example of augmented rb tree. Reference - 246Interval tree is an example of augmented rb tree. Reference -
216"Introduction to Algorithms" by Cormen, Leiserson, Rivest and Stein. 247"Introduction to Algorithms" by Cormen, Leiserson, Rivest and Stein.
@@ -230,26 +261,132 @@ and its immediate children. And this will be used in O(log n) lookup
230for lowest match (lowest start address among all possible matches) 261for lowest match (lowest start address among all possible matches)
231with something like: 262with something like:
232 263
233find_lowest_match(lo, hi, node) 264struct interval_tree_node *
265interval_tree_first_match(struct rb_root *root,
266 unsigned long start, unsigned long last)
234{ 267{
235 lowest_match = NULL; 268 struct interval_tree_node *node;
236 while (node) { 269
237 if (max_hi(node->left) > lo) { 270 if (!root->rb_node)
238 // Lowest overlap if any must be on left side 271 return NULL;
239 node = node->left; 272 node = rb_entry(root->rb_node, struct interval_tree_node, rb);
240 } else if (overlap(lo, hi, node)) { 273
241 lowest_match = node; 274 while (true) {
242 break; 275 if (node->rb.rb_left) {
243 } else if (lo > node->lo) { 276 struct interval_tree_node *left =
244 // Lowest overlap if any must be on right side 277 rb_entry(node->rb.rb_left,
245 node = node->right; 278 struct interval_tree_node, rb);
246 } else { 279 if (left->__subtree_last >= start) {
247 break; 280 /*
281 * Some nodes in left subtree satisfy Cond2.
282 * Iterate to find the leftmost such node N.
283 * If it also satisfies Cond1, that's the match
284 * we are looking for. Otherwise, there is no
285 * matching interval as nodes to the right of N
286 * can't satisfy Cond1 either.
287 */
288 node = left;
289 continue;
290 }
248 } 291 }
292 if (node->start <= last) { /* Cond1 */
293 if (node->last >= start) /* Cond2 */
294 return node; /* node is leftmost match */
295 if (node->rb.rb_right) {
296 node = rb_entry(node->rb.rb_right,
297 struct interval_tree_node, rb);
298 if (node->__subtree_last >= start)
299 continue;
300 }
301 }
302 return NULL; /* No match */
303 }
304}
305
306Insertion/removal are defined using the following augmented callbacks:
307
308static inline unsigned long
309compute_subtree_last(struct interval_tree_node *node)
310{
311 unsigned long max = node->last, subtree_last;
312 if (node->rb.rb_left) {
313 subtree_last = rb_entry(node->rb.rb_left,
314 struct interval_tree_node, rb)->__subtree_last;
315 if (max < subtree_last)
316 max = subtree_last;
317 }
318 if (node->rb.rb_right) {
319 subtree_last = rb_entry(node->rb.rb_right,
320 struct interval_tree_node, rb)->__subtree_last;
321 if (max < subtree_last)
322 max = subtree_last;
323 }
324 return max;
325}
326
327static void augment_propagate(struct rb_node *rb, struct rb_node *stop)
328{
329 while (rb != stop) {
330 struct interval_tree_node *node =
331 rb_entry(rb, struct interval_tree_node, rb);
332 unsigned long subtree_last = compute_subtree_last(node);
333 if (node->__subtree_last == subtree_last)
334 break;
335 node->__subtree_last = subtree_last;
336 rb = rb_parent(&node->rb);
249 } 337 }
250 return lowest_match;
251} 338}
252 339
253Finding exact match will be to first find lowest match and then to follow 340static void augment_copy(struct rb_node *rb_old, struct rb_node *rb_new)
254successor nodes looking for exact match, until the start of a node is beyond 341{
255the hi value we are looking for. 342 struct interval_tree_node *old =
343 rb_entry(rb_old, struct interval_tree_node, rb);
344 struct interval_tree_node *new =
345 rb_entry(rb_new, struct interval_tree_node, rb);
346
347 new->__subtree_last = old->__subtree_last;
348}
349
350static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
351{
352 struct interval_tree_node *old =
353 rb_entry(rb_old, struct interval_tree_node, rb);
354 struct interval_tree_node *new =
355 rb_entry(rb_new, struct interval_tree_node, rb);
356
357 new->__subtree_last = old->__subtree_last;
358 old->__subtree_last = compute_subtree_last(old);
359}
360
361static const struct rb_augment_callbacks augment_callbacks = {
362 augment_propagate, augment_copy, augment_rotate
363};
364
365void interval_tree_insert(struct interval_tree_node *node,
366 struct rb_root *root)
367{
368 struct rb_node **link = &root->rb_node, *rb_parent = NULL;
369 unsigned long start = node->start, last = node->last;
370 struct interval_tree_node *parent;
371
372 while (*link) {
373 rb_parent = *link;
374 parent = rb_entry(rb_parent, struct interval_tree_node, rb);
375 if (parent->__subtree_last < last)
376 parent->__subtree_last = last;
377 if (start < parent->start)
378 link = &parent->rb.rb_left;
379 else
380 link = &parent->rb.rb_right;
381 }
382
383 node->__subtree_last = last;
384 rb_link_node(&node->rb, rb_parent, link);
385 rb_insert_augmented(&node->rb, root, &augment_callbacks);
386}
387
388void interval_tree_remove(struct interval_tree_node *node,
389 struct rb_root *root)
390{
391 rb_erase_augmented(&node->rb, root, &augment_callbacks);
392}