diff options
| -rw-r--r-- | arch/x86/lib/usercopy.c | 2 | ||||
| -rw-r--r-- | kernel/events/core.c | 8 | ||||
| -rw-r--r-- | kernel/events/internal.h | 10 | ||||
| -rw-r--r-- | kernel/events/ring_buffer.c | 27 | ||||
| -rw-r--r-- | tools/include/linux/compiler.h | 58 | ||||
| -rw-r--r-- | tools/include/linux/export.h | 10 | ||||
| -rw-r--r-- | tools/include/linux/rbtree.h | 104 | ||||
| -rw-r--r-- | tools/include/linux/rbtree_augmented.h | 245 | ||||
| -rw-r--r-- | tools/lib/rbtree.c | 548 | ||||
| -rw-r--r-- | tools/perf/MANIFEST | 6 | ||||
| -rw-r--r-- | tools/perf/util/Build | 2 | ||||
| -rw-r--r-- | tools/perf/util/include/linux/rbtree.h | 16 | ||||
| -rw-r--r-- | tools/perf/util/include/linux/rbtree_augmented.h | 2 |
13 files changed, 995 insertions, 43 deletions
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index ddf9ecb53cc3..e342586db6e4 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c | |||
| @@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | |||
| 20 | unsigned long ret; | 20 | unsigned long ret; |
| 21 | 21 | ||
| 22 | if (__range_not_ok(from, n, TASK_SIZE)) | 22 | if (__range_not_ok(from, n, TASK_SIZE)) |
| 23 | return 0; | 23 | return n; |
| 24 | 24 | ||
| 25 | /* | 25 | /* |
| 26 | * Even though this function is typically called from NMI/IRQ context | 26 | * Even though this function is typically called from NMI/IRQ context |
diff --git a/kernel/events/core.c b/kernel/events/core.c index e965cfae4207..d3dae3419b99 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -4358,14 +4358,6 @@ static void ring_buffer_wakeup(struct perf_event *event) | |||
| 4358 | rcu_read_unlock(); | 4358 | rcu_read_unlock(); |
| 4359 | } | 4359 | } |
| 4360 | 4360 | ||
| 4361 | static void rb_free_rcu(struct rcu_head *rcu_head) | ||
| 4362 | { | ||
| 4363 | struct ring_buffer *rb; | ||
| 4364 | |||
| 4365 | rb = container_of(rcu_head, struct ring_buffer, rcu_head); | ||
| 4366 | rb_free(rb); | ||
| 4367 | } | ||
| 4368 | |||
| 4369 | struct ring_buffer *ring_buffer_get(struct perf_event *event) | 4361 | struct ring_buffer *ring_buffer_get(struct perf_event *event) |
| 4370 | { | 4362 | { |
| 4371 | struct ring_buffer *rb; | 4363 | struct ring_buffer *rb; |
diff --git a/kernel/events/internal.h b/kernel/events/internal.h index 2deb24c7a40d..2bbad9c1274c 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | struct ring_buffer { | 11 | struct ring_buffer { |
| 12 | atomic_t refcount; | 12 | atomic_t refcount; |
| 13 | struct rcu_head rcu_head; | 13 | struct rcu_head rcu_head; |
| 14 | struct irq_work irq_work; | ||
| 14 | #ifdef CONFIG_PERF_USE_VMALLOC | 15 | #ifdef CONFIG_PERF_USE_VMALLOC |
| 15 | struct work_struct work; | 16 | struct work_struct work; |
| 16 | int page_order; /* allocation order */ | 17 | int page_order; /* allocation order */ |
| @@ -55,6 +56,15 @@ struct ring_buffer { | |||
| 55 | }; | 56 | }; |
| 56 | 57 | ||
| 57 | extern void rb_free(struct ring_buffer *rb); | 58 | extern void rb_free(struct ring_buffer *rb); |
| 59 | |||
| 60 | static inline void rb_free_rcu(struct rcu_head *rcu_head) | ||
| 61 | { | ||
| 62 | struct ring_buffer *rb; | ||
| 63 | |||
| 64 | rb = container_of(rcu_head, struct ring_buffer, rcu_head); | ||
| 65 | rb_free(rb); | ||
| 66 | } | ||
| 67 | |||
| 58 | extern struct ring_buffer * | 68 | extern struct ring_buffer * |
| 59 | rb_alloc(int nr_pages, long watermark, int cpu, int flags); | 69 | rb_alloc(int nr_pages, long watermark, int cpu, int flags); |
| 60 | extern void perf_event_wakeup(struct perf_event *event); | 70 | extern void perf_event_wakeup(struct perf_event *event); |
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 96472824a752..b2be01b1aa9d 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
| @@ -221,6 +221,8 @@ void perf_output_end(struct perf_output_handle *handle) | |||
| 221 | rcu_read_unlock(); | 221 | rcu_read_unlock(); |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | static void rb_irq_work(struct irq_work *work); | ||
| 225 | |||
| 224 | static void | 226 | static void |
| 225 | ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) | 227 | ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) |
| 226 | { | 228 | { |
| @@ -241,6 +243,16 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) | |||
| 241 | 243 | ||
| 242 | INIT_LIST_HEAD(&rb->event_list); | 244 | INIT_LIST_HEAD(&rb->event_list); |
| 243 | spin_lock_init(&rb->event_lock); | 245 | spin_lock_init(&rb->event_lock); |
| 246 | init_irq_work(&rb->irq_work, rb_irq_work); | ||
| 247 | } | ||
| 248 | |||
| 249 | static void ring_buffer_put_async(struct ring_buffer *rb) | ||
| 250 | { | ||
| 251 | if (!atomic_dec_and_test(&rb->refcount)) | ||
| 252 | return; | ||
| 253 | |||
| 254 | rb->rcu_head.next = (void *)rb; | ||
| 255 | irq_work_queue(&rb->irq_work); | ||
| 244 | } | 256 | } |
| 245 | 257 | ||
| 246 | /* | 258 | /* |
| @@ -319,7 +331,7 @@ err_put: | |||
| 319 | rb_free_aux(rb); | 331 | rb_free_aux(rb); |
| 320 | 332 | ||
| 321 | err: | 333 | err: |
| 322 | ring_buffer_put(rb); | 334 | ring_buffer_put_async(rb); |
| 323 | handle->event = NULL; | 335 | handle->event = NULL; |
| 324 | 336 | ||
| 325 | return NULL; | 337 | return NULL; |
| @@ -370,7 +382,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size, | |||
| 370 | 382 | ||
| 371 | local_set(&rb->aux_nest, 0); | 383 | local_set(&rb->aux_nest, 0); |
| 372 | rb_free_aux(rb); | 384 | rb_free_aux(rb); |
| 373 | ring_buffer_put(rb); | 385 | ring_buffer_put_async(rb); |
| 374 | } | 386 | } |
| 375 | 387 | ||
| 376 | /* | 388 | /* |
| @@ -557,7 +569,18 @@ static void __rb_free_aux(struct ring_buffer *rb) | |||
| 557 | void rb_free_aux(struct ring_buffer *rb) | 569 | void rb_free_aux(struct ring_buffer *rb) |
| 558 | { | 570 | { |
| 559 | if (atomic_dec_and_test(&rb->aux_refcount)) | 571 | if (atomic_dec_and_test(&rb->aux_refcount)) |
| 572 | irq_work_queue(&rb->irq_work); | ||
| 573 | } | ||
| 574 | |||
| 575 | static void rb_irq_work(struct irq_work *work) | ||
| 576 | { | ||
| 577 | struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work); | ||
| 578 | |||
| 579 | if (!atomic_read(&rb->aux_refcount)) | ||
| 560 | __rb_free_aux(rb); | 580 | __rb_free_aux(rb); |
| 581 | |||
| 582 | if (rb->rcu_head.next == (void *)rb) | ||
| 583 | call_rcu(&rb->rcu_head, rb_free_rcu); | ||
| 561 | } | 584 | } |
| 562 | 585 | ||
| 563 | #ifndef CONFIG_PERF_USE_VMALLOC | 586 | #ifndef CONFIG_PERF_USE_VMALLOC |
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index f0e72674c52d..9098083869c8 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h | |||
| @@ -41,4 +41,62 @@ | |||
| 41 | 41 | ||
| 42 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | 42 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
| 43 | 43 | ||
| 44 | #include <linux/types.h> | ||
| 45 | |||
| 46 | static __always_inline void __read_once_size(const volatile void *p, void *res, int size) | ||
| 47 | { | ||
| 48 | switch (size) { | ||
| 49 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; | ||
| 50 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; | ||
| 51 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; | ||
| 52 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; | ||
| 53 | default: | ||
| 54 | barrier(); | ||
| 55 | __builtin_memcpy((void *)res, (const void *)p, size); | ||
| 56 | barrier(); | ||
| 57 | } | ||
| 58 | } | ||
| 59 | |||
| 60 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) | ||
| 61 | { | ||
| 62 | switch (size) { | ||
| 63 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; | ||
| 64 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; | ||
| 65 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; | ||
| 66 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; | ||
| 67 | default: | ||
| 68 | barrier(); | ||
| 69 | __builtin_memcpy((void *)p, (const void *)res, size); | ||
| 70 | barrier(); | ||
| 71 | } | ||
| 72 | } | ||
| 73 | |||
| 74 | /* | ||
| 75 | * Prevent the compiler from merging or refetching reads or writes. The | ||
| 76 | * compiler is also forbidden from reordering successive instances of | ||
| 77 | * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the | ||
| 78 | * compiler is aware of some particular ordering. One way to make the | ||
| 79 | * compiler aware of ordering is to put the two invocations of READ_ONCE, | ||
| 80 | * WRITE_ONCE or ACCESS_ONCE() in different C statements. | ||
| 81 | * | ||
| 82 | * In contrast to ACCESS_ONCE these two macros will also work on aggregate | ||
| 83 | * data types like structs or unions. If the size of the accessed data | ||
| 84 | * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) | ||
| 85 | * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a | ||
| 86 | * compile-time warning. | ||
| 87 | * | ||
| 88 | * Their two major use cases are: (1) Mediating communication between | ||
| 89 | * process-level code and irq/NMI handlers, all running on the same CPU, | ||
| 90 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise | ||
| 91 | * mutilate accesses that either do not require ordering or that interact | ||
| 92 | * with an explicit memory barrier or atomic instruction that provides the | ||
| 93 | * required ordering. | ||
| 94 | */ | ||
| 95 | |||
| 96 | #define READ_ONCE(x) \ | ||
| 97 | ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) | ||
| 98 | |||
| 99 | #define WRITE_ONCE(x, val) \ | ||
| 100 | ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) | ||
| 101 | |||
| 44 | #endif /* _TOOLS_LINUX_COMPILER_H */ | 102 | #endif /* _TOOLS_LINUX_COMPILER_H */ |
diff --git a/tools/include/linux/export.h b/tools/include/linux/export.h deleted file mode 100644 index d07e586b9ba0..000000000000 --- a/tools/include/linux/export.h +++ /dev/null | |||
| @@ -1,10 +0,0 @@ | |||
| 1 | #ifndef _TOOLS_LINUX_EXPORT_H_ | ||
| 2 | #define _TOOLS_LINUX_EXPORT_H_ | ||
| 3 | |||
| 4 | #define EXPORT_SYMBOL(sym) | ||
| 5 | #define EXPORT_SYMBOL_GPL(sym) | ||
| 6 | #define EXPORT_SYMBOL_GPL_FUTURE(sym) | ||
| 7 | #define EXPORT_UNUSED_SYMBOL(sym) | ||
| 8 | #define EXPORT_UNUSED_SYMBOL_GPL(sym) | ||
| 9 | |||
| 10 | #endif | ||
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h new file mode 100644 index 000000000000..112582253dd0 --- /dev/null +++ b/tools/include/linux/rbtree.h | |||
| @@ -0,0 +1,104 @@ | |||
| 1 | /* | ||
| 2 | Red Black Trees | ||
| 3 | (C) 1999 Andrea Arcangeli <andrea@suse.de> | ||
| 4 | |||
| 5 | This program is free software; you can redistribute it and/or modify | ||
| 6 | it under the terms of the GNU General Public License as published by | ||
| 7 | the Free Software Foundation; either version 2 of the License, or | ||
| 8 | (at your option) any later version. | ||
| 9 | |||
| 10 | This program is distributed in the hope that it will be useful, | ||
| 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | GNU General Public License for more details. | ||
| 14 | |||
| 15 | You should have received a copy of the GNU General Public License | ||
| 16 | along with this program; if not, write to the Free Software | ||
| 17 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 18 | |||
| 19 | linux/include/linux/rbtree.h | ||
| 20 | |||
| 21 | To use rbtrees you'll have to implement your own insert and search cores. | ||
| 22 | This will avoid us to use callbacks and to drop drammatically performances. | ||
| 23 | I know it's not the cleaner way, but in C (not in C++) to get | ||
| 24 | performances and genericity... | ||
| 25 | |||
| 26 | See Documentation/rbtree.txt for documentation and samples. | ||
| 27 | */ | ||
| 28 | |||
| 29 | #ifndef __TOOLS_LINUX_PERF_RBTREE_H | ||
| 30 | #define __TOOLS_LINUX_PERF_RBTREE_H | ||
| 31 | |||
| 32 | #include <linux/kernel.h> | ||
| 33 | #include <linux/stddef.h> | ||
| 34 | |||
| 35 | struct rb_node { | ||
| 36 | unsigned long __rb_parent_color; | ||
| 37 | struct rb_node *rb_right; | ||
| 38 | struct rb_node *rb_left; | ||
| 39 | } __attribute__((aligned(sizeof(long)))); | ||
| 40 | /* The alignment might seem pointless, but allegedly CRIS needs it */ | ||
| 41 | |||
| 42 | struct rb_root { | ||
| 43 | struct rb_node *rb_node; | ||
| 44 | }; | ||
| 45 | |||
| 46 | |||
| 47 | #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) | ||
| 48 | |||
| 49 | #define RB_ROOT (struct rb_root) { NULL, } | ||
| 50 | #define rb_entry(ptr, type, member) container_of(ptr, type, member) | ||
| 51 | |||
| 52 | #define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) | ||
| 53 | |||
| 54 | /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ | ||
| 55 | #define RB_EMPTY_NODE(node) \ | ||
| 56 | ((node)->__rb_parent_color == (unsigned long)(node)) | ||
| 57 | #define RB_CLEAR_NODE(node) \ | ||
| 58 | ((node)->__rb_parent_color = (unsigned long)(node)) | ||
| 59 | |||
| 60 | |||
| 61 | extern void rb_insert_color(struct rb_node *, struct rb_root *); | ||
| 62 | extern void rb_erase(struct rb_node *, struct rb_root *); | ||
| 63 | |||
| 64 | |||
| 65 | /* Find logical next and previous nodes in a tree */ | ||
| 66 | extern struct rb_node *rb_next(const struct rb_node *); | ||
| 67 | extern struct rb_node *rb_prev(const struct rb_node *); | ||
| 68 | extern struct rb_node *rb_first(const struct rb_root *); | ||
| 69 | extern struct rb_node *rb_last(const struct rb_root *); | ||
| 70 | |||
| 71 | /* Postorder iteration - always visit the parent after its children */ | ||
| 72 | extern struct rb_node *rb_first_postorder(const struct rb_root *); | ||
| 73 | extern struct rb_node *rb_next_postorder(const struct rb_node *); | ||
| 74 | |||
| 75 | /* Fast replacement of a single node without remove/rebalance/add/rebalance */ | ||
| 76 | extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, | ||
| 77 | struct rb_root *root); | ||
| 78 | |||
| 79 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, | ||
| 80 | struct rb_node **rb_link) | ||
| 81 | { | ||
| 82 | node->__rb_parent_color = (unsigned long)parent; | ||
| 83 | node->rb_left = node->rb_right = NULL; | ||
| 84 | |||
| 85 | *rb_link = node; | ||
| 86 | } | ||
| 87 | |||
| 88 | #define rb_entry_safe(ptr, type, member) \ | ||
| 89 | ({ typeof(ptr) ____ptr = (ptr); \ | ||
| 90 | ____ptr ? rb_entry(____ptr, type, member) : NULL; \ | ||
| 91 | }) | ||
| 92 | |||
| 93 | |||
| 94 | /* | ||
| 95 | * Handy for checking that we are not deleting an entry that is | ||
| 96 | * already in a list, found in block/{blk-throttle,cfq-iosched}.c, | ||
| 97 | * probably should be moved to lib/rbtree.c... | ||
| 98 | */ | ||
| 99 | static inline void rb_erase_init(struct rb_node *n, struct rb_root *root) | ||
| 100 | { | ||
| 101 | rb_erase(n, root); | ||
| 102 | RB_CLEAR_NODE(n); | ||
| 103 | } | ||
| 104 | #endif /* __TOOLS_LINUX_PERF_RBTREE_H */ | ||
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h new file mode 100644 index 000000000000..43be941db695 --- /dev/null +++ b/tools/include/linux/rbtree_augmented.h | |||
| @@ -0,0 +1,245 @@ | |||
| 1 | /* | ||
| 2 | Red Black Trees | ||
| 3 | (C) 1999 Andrea Arcangeli <andrea@suse.de> | ||
| 4 | (C) 2002 David Woodhouse <dwmw2@infradead.org> | ||
| 5 | (C) 2012 Michel Lespinasse <walken@google.com> | ||
| 6 | |||
| 7 | This program is free software; you can redistribute it and/or modify | ||
| 8 | it under the terms of the GNU General Public License as published by | ||
| 9 | the Free Software Foundation; either version 2 of the License, or | ||
| 10 | (at your option) any later version. | ||
| 11 | |||
| 12 | This program is distributed in the hope that it will be useful, | ||
| 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | GNU General Public License for more details. | ||
| 16 | |||
| 17 | You should have received a copy of the GNU General Public License | ||
| 18 | along with this program; if not, write to the Free Software | ||
| 19 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 20 | |||
| 21 | tools/linux/include/linux/rbtree_augmented.h | ||
| 22 | |||
| 23 | Copied from: | ||
| 24 | linux/include/linux/rbtree_augmented.h | ||
| 25 | */ | ||
| 26 | |||
| 27 | #ifndef _TOOLS_LINUX_RBTREE_AUGMENTED_H | ||
| 28 | #define _TOOLS_LINUX_RBTREE_AUGMENTED_H | ||
| 29 | |||
| 30 | #include <linux/compiler.h> | ||
| 31 | #include <linux/rbtree.h> | ||
| 32 | |||
| 33 | /* | ||
| 34 | * Please note - only struct rb_augment_callbacks and the prototypes for | ||
| 35 | * rb_insert_augmented() and rb_erase_augmented() are intended to be public. | ||
| 36 | * The rest are implementation details you are not expected to depend on. | ||
| 37 | * | ||
| 38 | * See Documentation/rbtree.txt for documentation and samples. | ||
| 39 | */ | ||
| 40 | |||
| 41 | struct rb_augment_callbacks { | ||
| 42 | void (*propagate)(struct rb_node *node, struct rb_node *stop); | ||
| 43 | void (*copy)(struct rb_node *old, struct rb_node *new); | ||
| 44 | void (*rotate)(struct rb_node *old, struct rb_node *new); | ||
| 45 | }; | ||
| 46 | |||
| 47 | extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, | ||
| 48 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); | ||
| 49 | /* | ||
| 50 | * Fixup the rbtree and update the augmented information when rebalancing. | ||
| 51 | * | ||
| 52 | * On insertion, the user must update the augmented information on the path | ||
| 53 | * leading to the inserted node, then call rb_link_node() as usual and | ||
| 54 | * rb_augment_inserted() instead of the usual rb_insert_color() call. | ||
| 55 | * If rb_augment_inserted() rebalances the rbtree, it will callback into | ||
| 56 | * a user provided function to update the augmented information on the | ||
| 57 | * affected subtrees. | ||
| 58 | */ | ||
| 59 | static inline void | ||
| 60 | rb_insert_augmented(struct rb_node *node, struct rb_root *root, | ||
| 61 | const struct rb_augment_callbacks *augment) | ||
| 62 | { | ||
| 63 | __rb_insert_augmented(node, root, augment->rotate); | ||
| 64 | } | ||
| 65 | |||
| 66 | #define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ | ||
| 67 | rbtype, rbaugmented, rbcompute) \ | ||
| 68 | static inline void \ | ||
| 69 | rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \ | ||
| 70 | { \ | ||
| 71 | while (rb != stop) { \ | ||
| 72 | rbstruct *node = rb_entry(rb, rbstruct, rbfield); \ | ||
| 73 | rbtype augmented = rbcompute(node); \ | ||
| 74 | if (node->rbaugmented == augmented) \ | ||
| 75 | break; \ | ||
| 76 | node->rbaugmented = augmented; \ | ||
| 77 | rb = rb_parent(&node->rbfield); \ | ||
| 78 | } \ | ||
| 79 | } \ | ||
| 80 | static inline void \ | ||
| 81 | rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ | ||
| 82 | { \ | ||
| 83 | rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ | ||
| 84 | rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ | ||
| 85 | new->rbaugmented = old->rbaugmented; \ | ||
| 86 | } \ | ||
| 87 | static void \ | ||
| 88 | rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ | ||
| 89 | { \ | ||
| 90 | rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ | ||
| 91 | rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ | ||
| 92 | new->rbaugmented = old->rbaugmented; \ | ||
| 93 | old->rbaugmented = rbcompute(old); \ | ||
| 94 | } \ | ||
| 95 | rbstatic const struct rb_augment_callbacks rbname = { \ | ||
| 96 | rbname ## _propagate, rbname ## _copy, rbname ## _rotate \ | ||
| 97 | }; | ||
| 98 | |||
| 99 | |||
| 100 | #define RB_RED 0 | ||
| 101 | #define RB_BLACK 1 | ||
| 102 | |||
| 103 | #define __rb_parent(pc) ((struct rb_node *)(pc & ~3)) | ||
| 104 | |||
| 105 | #define __rb_color(pc) ((pc) & 1) | ||
| 106 | #define __rb_is_black(pc) __rb_color(pc) | ||
| 107 | #define __rb_is_red(pc) (!__rb_color(pc)) | ||
| 108 | #define rb_color(rb) __rb_color((rb)->__rb_parent_color) | ||
| 109 | #define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color) | ||
| 110 | #define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) | ||
| 111 | |||
| 112 | static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) | ||
| 113 | { | ||
| 114 | rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; | ||
| 115 | } | ||
| 116 | |||
| 117 | static inline void rb_set_parent_color(struct rb_node *rb, | ||
| 118 | struct rb_node *p, int color) | ||
| 119 | { | ||
| 120 | rb->__rb_parent_color = (unsigned long)p | color; | ||
| 121 | } | ||
| 122 | |||
| 123 | static inline void | ||
| 124 | __rb_change_child(struct rb_node *old, struct rb_node *new, | ||
| 125 | struct rb_node *parent, struct rb_root *root) | ||
| 126 | { | ||
| 127 | if (parent) { | ||
| 128 | if (parent->rb_left == old) | ||
| 129 | parent->rb_left = new; | ||
| 130 | else | ||
| 131 | parent->rb_right = new; | ||
| 132 | } else | ||
| 133 | root->rb_node = new; | ||
| 134 | } | ||
| 135 | |||
| 136 | extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, | ||
| 137 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); | ||
| 138 | |||
| 139 | static __always_inline struct rb_node * | ||
| 140 | __rb_erase_augmented(struct rb_node *node, struct rb_root *root, | ||
| 141 | const struct rb_augment_callbacks *augment) | ||
| 142 | { | ||
| 143 | struct rb_node *child = node->rb_right, *tmp = node->rb_left; | ||
| 144 | struct rb_node *parent, *rebalance; | ||
| 145 | unsigned long pc; | ||
| 146 | |||
| 147 | if (!tmp) { | ||
| 148 | /* | ||
| 149 | * Case 1: node to erase has no more than 1 child (easy!) | ||
| 150 | * | ||
| 151 | * Note that if there is one child it must be red due to 5) | ||
| 152 | * and node must be black due to 4). We adjust colors locally | ||
| 153 | * so as to bypass __rb_erase_color() later on. | ||
| 154 | */ | ||
| 155 | pc = node->__rb_parent_color; | ||
| 156 | parent = __rb_parent(pc); | ||
| 157 | __rb_change_child(node, child, parent, root); | ||
| 158 | if (child) { | ||
| 159 | child->__rb_parent_color = pc; | ||
| 160 | rebalance = NULL; | ||
| 161 | } else | ||
| 162 | rebalance = __rb_is_black(pc) ? parent : NULL; | ||
| 163 | tmp = parent; | ||
| 164 | } else if (!child) { | ||
| 165 | /* Still case 1, but this time the child is node->rb_left */ | ||
| 166 | tmp->__rb_parent_color = pc = node->__rb_parent_color; | ||
| 167 | parent = __rb_parent(pc); | ||
| 168 | __rb_change_child(node, tmp, parent, root); | ||
| 169 | rebalance = NULL; | ||
| 170 | tmp = parent; | ||
| 171 | } else { | ||
| 172 | struct rb_node *successor = child, *child2; | ||
| 173 | tmp = child->rb_left; | ||
| 174 | if (!tmp) { | ||
| 175 | /* | ||
| 176 | * Case 2: node's successor is its right child | ||
| 177 | * | ||
| 178 | * (n) (s) | ||
| 179 | * / \ / \ | ||
| 180 | * (x) (s) -> (x) (c) | ||
| 181 | * \ | ||
| 182 | * (c) | ||
| 183 | */ | ||
| 184 | parent = successor; | ||
| 185 | child2 = successor->rb_right; | ||
| 186 | augment->copy(node, successor); | ||
| 187 | } else { | ||
| 188 | /* | ||
| 189 | * Case 3: node's successor is leftmost under | ||
| 190 | * node's right child subtree | ||
| 191 | * | ||
| 192 | * (n) (s) | ||
| 193 | * / \ / \ | ||
| 194 | * (x) (y) -> (x) (y) | ||
| 195 | * / / | ||
| 196 | * (p) (p) | ||
| 197 | * / / | ||
| 198 | * (s) (c) | ||
| 199 | * \ | ||
| 200 | * (c) | ||
| 201 | */ | ||
| 202 | do { | ||
| 203 | parent = successor; | ||
| 204 | successor = tmp; | ||
| 205 | tmp = tmp->rb_left; | ||
| 206 | } while (tmp); | ||
| 207 | parent->rb_left = child2 = successor->rb_right; | ||
| 208 | successor->rb_right = child; | ||
| 209 | rb_set_parent(child, successor); | ||
| 210 | augment->copy(node, successor); | ||
| 211 | augment->propagate(parent, successor); | ||
| 212 | } | ||
| 213 | |||
| 214 | successor->rb_left = tmp = node->rb_left; | ||
| 215 | rb_set_parent(tmp, successor); | ||
| 216 | |||
| 217 | pc = node->__rb_parent_color; | ||
| 218 | tmp = __rb_parent(pc); | ||
| 219 | __rb_change_child(node, successor, tmp, root); | ||
| 220 | if (child2) { | ||
| 221 | successor->__rb_parent_color = pc; | ||
| 222 | rb_set_parent_color(child2, parent, RB_BLACK); | ||
| 223 | rebalance = NULL; | ||
| 224 | } else { | ||
| 225 | unsigned long pc2 = successor->__rb_parent_color; | ||
| 226 | successor->__rb_parent_color = pc; | ||
| 227 | rebalance = __rb_is_black(pc2) ? parent : NULL; | ||
| 228 | } | ||
| 229 | tmp = successor; | ||
| 230 | } | ||
| 231 | |||
| 232 | augment->propagate(tmp, NULL); | ||
| 233 | return rebalance; | ||
| 234 | } | ||
| 235 | |||
| 236 | static __always_inline void | ||
| 237 | rb_erase_augmented(struct rb_node *node, struct rb_root *root, | ||
| 238 | const struct rb_augment_callbacks *augment) | ||
| 239 | { | ||
| 240 | struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); | ||
| 241 | if (rebalance) | ||
| 242 | __rb_erase_color(rebalance, root, augment->rotate); | ||
| 243 | } | ||
| 244 | |||
| 245 | #endif /* _TOOLS_LINUX_RBTREE_AUGMENTED_H */ | ||
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c new file mode 100644 index 000000000000..17c2b596f043 --- /dev/null +++ b/tools/lib/rbtree.c | |||
| @@ -0,0 +1,548 @@ | |||
| 1 | /* | ||
| 2 | Red Black Trees | ||
| 3 | (C) 1999 Andrea Arcangeli <andrea@suse.de> | ||
| 4 | (C) 2002 David Woodhouse <dwmw2@infradead.org> | ||
| 5 | (C) 2012 Michel Lespinasse <walken@google.com> | ||
| 6 | |||
| 7 | This program is free software; you can redistribute it and/or modify | ||
| 8 | it under the terms of the GNU General Public License as published by | ||
| 9 | the Free Software Foundation; either version 2 of the License, or | ||
| 10 | (at your option) any later version. | ||
| 11 | |||
| 12 | This program is distributed in the hope that it will be useful, | ||
| 13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | GNU General Public License for more details. | ||
| 16 | |||
| 17 | You should have received a copy of the GNU General Public License | ||
| 18 | along with this program; if not, write to the Free Software | ||
| 19 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 20 | |||
| 21 | linux/lib/rbtree.c | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/rbtree_augmented.h> | ||
| 25 | |||
| 26 | /* | ||
| 27 | * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree | ||
| 28 | * | ||
| 29 | * 1) A node is either red or black | ||
| 30 | * 2) The root is black | ||
| 31 | * 3) All leaves (NULL) are black | ||
| 32 | * 4) Both children of every red node are black | ||
| 33 | * 5) Every simple path from root to leaves contains the same number | ||
| 34 | * of black nodes. | ||
| 35 | * | ||
| 36 | * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two | ||
| 37 | * consecutive red nodes in a path and every red node is therefore followed by | ||
| 38 | * a black. So if B is the number of black nodes on every simple path (as per | ||
| 39 | * 5), then the longest possible path due to 4 is 2B. | ||
| 40 | * | ||
| 41 | * We shall indicate color with case, where black nodes are uppercase and red | ||
| 42 | * nodes will be lowercase. Unknown color nodes shall be drawn as red within | ||
| 43 | * parentheses and have some accompanying text comment. | ||
| 44 | */ | ||
| 45 | |||
| 46 | static inline void rb_set_black(struct rb_node *rb) | ||
| 47 | { | ||
| 48 | rb->__rb_parent_color |= RB_BLACK; | ||
| 49 | } | ||
| 50 | |||
| 51 | static inline struct rb_node *rb_red_parent(struct rb_node *red) | ||
| 52 | { | ||
| 53 | return (struct rb_node *)red->__rb_parent_color; | ||
| 54 | } | ||
| 55 | |||
| 56 | /* | ||
| 57 | * Helper function for rotations: | ||
| 58 | * - old's parent and color get assigned to new | ||
| 59 | * - old gets assigned new as a parent and 'color' as a color. | ||
| 60 | */ | ||
| 61 | static inline void | ||
| 62 | __rb_rotate_set_parents(struct rb_node *old, struct rb_node *new, | ||
| 63 | struct rb_root *root, int color) | ||
| 64 | { | ||
| 65 | struct rb_node *parent = rb_parent(old); | ||
| 66 | new->__rb_parent_color = old->__rb_parent_color; | ||
| 67 | rb_set_parent_color(old, new, color); | ||
| 68 | __rb_change_child(old, new, parent, root); | ||
| 69 | } | ||
| 70 | |||
| 71 | static __always_inline void | ||
| 72 | __rb_insert(struct rb_node *node, struct rb_root *root, | ||
| 73 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) | ||
| 74 | { | ||
| 75 | struct rb_node *parent = rb_red_parent(node), *gparent, *tmp; | ||
| 76 | |||
| 77 | while (true) { | ||
| 78 | /* | ||
| 79 | * Loop invariant: node is red | ||
| 80 | * | ||
| 81 | * If there is a black parent, we are done. | ||
| 82 | * Otherwise, take some corrective action as we don't | ||
| 83 | * want a red root or two consecutive red nodes. | ||
| 84 | */ | ||
| 85 | if (!parent) { | ||
| 86 | rb_set_parent_color(node, NULL, RB_BLACK); | ||
| 87 | break; | ||
| 88 | } else if (rb_is_black(parent)) | ||
| 89 | break; | ||
| 90 | |||
| 91 | gparent = rb_red_parent(parent); | ||
| 92 | |||
| 93 | tmp = gparent->rb_right; | ||
| 94 | if (parent != tmp) { /* parent == gparent->rb_left */ | ||
| 95 | if (tmp && rb_is_red(tmp)) { | ||
| 96 | /* | ||
| 97 | * Case 1 - color flips | ||
| 98 | * | ||
| 99 | * G g | ||
| 100 | * / \ / \ | ||
| 101 | * p u --> P U | ||
| 102 | * / / | ||
| 103 | * n n | ||
| 104 | * | ||
| 105 | * However, since g's parent might be red, and | ||
| 106 | * 4) does not allow this, we need to recurse | ||
| 107 | * at g. | ||
| 108 | */ | ||
| 109 | rb_set_parent_color(tmp, gparent, RB_BLACK); | ||
| 110 | rb_set_parent_color(parent, gparent, RB_BLACK); | ||
| 111 | node = gparent; | ||
| 112 | parent = rb_parent(node); | ||
| 113 | rb_set_parent_color(node, parent, RB_RED); | ||
| 114 | continue; | ||
| 115 | } | ||
| 116 | |||
| 117 | tmp = parent->rb_right; | ||
| 118 | if (node == tmp) { | ||
| 119 | /* | ||
| 120 | * Case 2 - left rotate at parent | ||
| 121 | * | ||
| 122 | * G G | ||
| 123 | * / \ / \ | ||
| 124 | * p U --> n U | ||
| 125 | * \ / | ||
| 126 | * n p | ||
| 127 | * | ||
| 128 | * This still leaves us in violation of 4), the | ||
| 129 | * continuation into Case 3 will fix that. | ||
| 130 | */ | ||
| 131 | parent->rb_right = tmp = node->rb_left; | ||
| 132 | node->rb_left = parent; | ||
| 133 | if (tmp) | ||
| 134 | rb_set_parent_color(tmp, parent, | ||
| 135 | RB_BLACK); | ||
| 136 | rb_set_parent_color(parent, node, RB_RED); | ||
| 137 | augment_rotate(parent, node); | ||
| 138 | parent = node; | ||
| 139 | tmp = node->rb_right; | ||
| 140 | } | ||
| 141 | |||
| 142 | /* | ||
| 143 | * Case 3 - right rotate at gparent | ||
| 144 | * | ||
| 145 | * G P | ||
| 146 | * / \ / \ | ||
| 147 | * p U --> n g | ||
| 148 | * / \ | ||
| 149 | * n U | ||
| 150 | */ | ||
| 151 | gparent->rb_left = tmp; /* == parent->rb_right */ | ||
| 152 | parent->rb_right = gparent; | ||
| 153 | if (tmp) | ||
| 154 | rb_set_parent_color(tmp, gparent, RB_BLACK); | ||
| 155 | __rb_rotate_set_parents(gparent, parent, root, RB_RED); | ||
| 156 | augment_rotate(gparent, parent); | ||
| 157 | break; | ||
| 158 | } else { | ||
| 159 | tmp = gparent->rb_left; | ||
| 160 | if (tmp && rb_is_red(tmp)) { | ||
| 161 | /* Case 1 - color flips */ | ||
| 162 | rb_set_parent_color(tmp, gparent, RB_BLACK); | ||
| 163 | rb_set_parent_color(parent, gparent, RB_BLACK); | ||
| 164 | node = gparent; | ||
| 165 | parent = rb_parent(node); | ||
| 166 | rb_set_parent_color(node, parent, RB_RED); | ||
| 167 | continue; | ||
| 168 | } | ||
| 169 | |||
| 170 | tmp = parent->rb_left; | ||
| 171 | if (node == tmp) { | ||
| 172 | /* Case 2 - right rotate at parent */ | ||
| 173 | parent->rb_left = tmp = node->rb_right; | ||
| 174 | node->rb_right = parent; | ||
| 175 | if (tmp) | ||
| 176 | rb_set_parent_color(tmp, parent, | ||
| 177 | RB_BLACK); | ||
| 178 | rb_set_parent_color(parent, node, RB_RED); | ||
| 179 | augment_rotate(parent, node); | ||
| 180 | parent = node; | ||
| 181 | tmp = node->rb_left; | ||
| 182 | } | ||
| 183 | |||
| 184 | /* Case 3 - left rotate at gparent */ | ||
| 185 | gparent->rb_right = tmp; /* == parent->rb_left */ | ||
| 186 | parent->rb_left = gparent; | ||
| 187 | if (tmp) | ||
| 188 | rb_set_parent_color(tmp, gparent, RB_BLACK); | ||
| 189 | __rb_rotate_set_parents(gparent, parent, root, RB_RED); | ||
| 190 | augment_rotate(gparent, parent); | ||
| 191 | break; | ||
| 192 | } | ||
| 193 | } | ||
| 194 | } | ||
| 195 | |||
| 196 | /* | ||
| 197 | * Inline version for rb_erase() use - we want to be able to inline | ||
| 198 | * and eliminate the dummy_rotate callback there | ||
| 199 | */ | ||
| 200 | static __always_inline void | ||
| 201 | ____rb_erase_color(struct rb_node *parent, struct rb_root *root, | ||
| 202 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) | ||
| 203 | { | ||
| 204 | struct rb_node *node = NULL, *sibling, *tmp1, *tmp2; | ||
| 205 | |||
| 206 | while (true) { | ||
| 207 | /* | ||
| 208 | * Loop invariants: | ||
| 209 | * - node is black (or NULL on first iteration) | ||
| 210 | * - node is not the root (parent is not NULL) | ||
| 211 | * - All leaf paths going through parent and node have a | ||
| 212 | * black node count that is 1 lower than other leaf paths. | ||
| 213 | */ | ||
| 214 | sibling = parent->rb_right; | ||
| 215 | if (node != sibling) { /* node == parent->rb_left */ | ||
| 216 | if (rb_is_red(sibling)) { | ||
| 217 | /* | ||
| 218 | * Case 1 - left rotate at parent | ||
| 219 | * | ||
| 220 | * P S | ||
| 221 | * / \ / \ | ||
| 222 | * N s --> p Sr | ||
| 223 | * / \ / \ | ||
| 224 | * Sl Sr N Sl | ||
| 225 | */ | ||
| 226 | parent->rb_right = tmp1 = sibling->rb_left; | ||
| 227 | sibling->rb_left = parent; | ||
| 228 | rb_set_parent_color(tmp1, parent, RB_BLACK); | ||
| 229 | __rb_rotate_set_parents(parent, sibling, root, | ||
| 230 | RB_RED); | ||
| 231 | augment_rotate(parent, sibling); | ||
| 232 | sibling = tmp1; | ||
| 233 | } | ||
| 234 | tmp1 = sibling->rb_right; | ||
| 235 | if (!tmp1 || rb_is_black(tmp1)) { | ||
| 236 | tmp2 = sibling->rb_left; | ||
| 237 | if (!tmp2 || rb_is_black(tmp2)) { | ||
| 238 | /* | ||
| 239 | * Case 2 - sibling color flip | ||
| 240 | * (p could be either color here) | ||
| 241 | * | ||
| 242 | * (p) (p) | ||
| 243 | * / \ / \ | ||
| 244 | * N S --> N s | ||
| 245 | * / \ / \ | ||
| 246 | * Sl Sr Sl Sr | ||
| 247 | * | ||
| 248 | * This leaves us violating 5) which | ||
| 249 | * can be fixed by flipping p to black | ||
| 250 | * if it was red, or by recursing at p. | ||
| 251 | * p is red when coming from Case 1. | ||
| 252 | */ | ||
| 253 | rb_set_parent_color(sibling, parent, | ||
| 254 | RB_RED); | ||
| 255 | if (rb_is_red(parent)) | ||
| 256 | rb_set_black(parent); | ||
| 257 | else { | ||
| 258 | node = parent; | ||
| 259 | parent = rb_parent(node); | ||
| 260 | if (parent) | ||
| 261 | continue; | ||
| 262 | } | ||
| 263 | break; | ||
| 264 | } | ||
| 265 | /* | ||
| 266 | * Case 3 - right rotate at sibling | ||
| 267 | * (p could be either color here) | ||
| 268 | * | ||
| 269 | * (p) (p) | ||
| 270 | * / \ / \ | ||
| 271 | * N S --> N Sl | ||
| 272 | * / \ \ | ||
| 273 | * sl Sr s | ||
| 274 | * \ | ||
| 275 | * Sr | ||
| 276 | */ | ||
| 277 | sibling->rb_left = tmp1 = tmp2->rb_right; | ||
| 278 | tmp2->rb_right = sibling; | ||
| 279 | parent->rb_right = tmp2; | ||
| 280 | if (tmp1) | ||
| 281 | rb_set_parent_color(tmp1, sibling, | ||
| 282 | RB_BLACK); | ||
| 283 | augment_rotate(sibling, tmp2); | ||
| 284 | tmp1 = sibling; | ||
| 285 | sibling = tmp2; | ||
| 286 | } | ||
| 287 | /* | ||
| 288 | * Case 4 - left rotate at parent + color flips | ||
| 289 | * (p and sl could be either color here. | ||
| 290 | * After rotation, p becomes black, s acquires | ||
| 291 | * p's color, and sl keeps its color) | ||
| 292 | * | ||
| 293 | * (p) (s) | ||
| 294 | * / \ / \ | ||
| 295 | * N S --> P Sr | ||
| 296 | * / \ / \ | ||
| 297 | * (sl) sr N (sl) | ||
| 298 | */ | ||
| 299 | parent->rb_right = tmp2 = sibling->rb_left; | ||
| 300 | sibling->rb_left = parent; | ||
| 301 | rb_set_parent_color(tmp1, sibling, RB_BLACK); | ||
| 302 | if (tmp2) | ||
| 303 | rb_set_parent(tmp2, parent); | ||
| 304 | __rb_rotate_set_parents(parent, sibling, root, | ||
| 305 | RB_BLACK); | ||
| 306 | augment_rotate(parent, sibling); | ||
| 307 | break; | ||
| 308 | } else { | ||
| 309 | sibling = parent->rb_left; | ||
| 310 | if (rb_is_red(sibling)) { | ||
| 311 | /* Case 1 - right rotate at parent */ | ||
| 312 | parent->rb_left = tmp1 = sibling->rb_right; | ||
| 313 | sibling->rb_right = parent; | ||
| 314 | rb_set_parent_color(tmp1, parent, RB_BLACK); | ||
| 315 | __rb_rotate_set_parents(parent, sibling, root, | ||
| 316 | RB_RED); | ||
| 317 | augment_rotate(parent, sibling); | ||
| 318 | sibling = tmp1; | ||
| 319 | } | ||
| 320 | tmp1 = sibling->rb_left; | ||
| 321 | if (!tmp1 || rb_is_black(tmp1)) { | ||
| 322 | tmp2 = sibling->rb_right; | ||
| 323 | if (!tmp2 || rb_is_black(tmp2)) { | ||
| 324 | /* Case 2 - sibling color flip */ | ||
| 325 | rb_set_parent_color(sibling, parent, | ||
| 326 | RB_RED); | ||
| 327 | if (rb_is_red(parent)) | ||
| 328 | rb_set_black(parent); | ||
| 329 | else { | ||
| 330 | node = parent; | ||
| 331 | parent = rb_parent(node); | ||
| 332 | if (parent) | ||
| 333 | continue; | ||
| 334 | } | ||
| 335 | break; | ||
| 336 | } | ||
| 337 | /* Case 3 - right rotate at sibling */ | ||
| 338 | sibling->rb_right = tmp1 = tmp2->rb_left; | ||
| 339 | tmp2->rb_left = sibling; | ||
| 340 | parent->rb_left = tmp2; | ||
| 341 | if (tmp1) | ||
| 342 | rb_set_parent_color(tmp1, sibling, | ||
| 343 | RB_BLACK); | ||
| 344 | augment_rotate(sibling, tmp2); | ||
| 345 | tmp1 = sibling; | ||
| 346 | sibling = tmp2; | ||
| 347 | } | ||
| 348 | /* Case 4 - left rotate at parent + color flips */ | ||
| 349 | parent->rb_left = tmp2 = sibling->rb_right; | ||
| 350 | sibling->rb_right = parent; | ||
| 351 | rb_set_parent_color(tmp1, sibling, RB_BLACK); | ||
| 352 | if (tmp2) | ||
| 353 | rb_set_parent(tmp2, parent); | ||
| 354 | __rb_rotate_set_parents(parent, sibling, root, | ||
| 355 | RB_BLACK); | ||
| 356 | augment_rotate(parent, sibling); | ||
| 357 | break; | ||
| 358 | } | ||
| 359 | } | ||
| 360 | } | ||
| 361 | |||
| 362 | /* Non-inline version for rb_erase_augmented() use */ | ||
| 363 | void __rb_erase_color(struct rb_node *parent, struct rb_root *root, | ||
| 364 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) | ||
| 365 | { | ||
| 366 | ____rb_erase_color(parent, root, augment_rotate); | ||
| 367 | } | ||
| 368 | |||
| 369 | /* | ||
| 370 | * Non-augmented rbtree manipulation functions. | ||
| 371 | * | ||
| 372 | * We use dummy augmented callbacks here, and have the compiler optimize them | ||
| 373 | * out of the rb_insert_color() and rb_erase() function definitions. | ||
| 374 | */ | ||
| 375 | |||
| 376 | static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {} | ||
| 377 | static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {} | ||
| 378 | static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} | ||
| 379 | |||
| 380 | static const struct rb_augment_callbacks dummy_callbacks = { | ||
| 381 | dummy_propagate, dummy_copy, dummy_rotate | ||
| 382 | }; | ||
| 383 | |||
| 384 | void rb_insert_color(struct rb_node *node, struct rb_root *root) | ||
| 385 | { | ||
| 386 | __rb_insert(node, root, dummy_rotate); | ||
| 387 | } | ||
| 388 | |||
| 389 | void rb_erase(struct rb_node *node, struct rb_root *root) | ||
| 390 | { | ||
| 391 | struct rb_node *rebalance; | ||
| 392 | rebalance = __rb_erase_augmented(node, root, &dummy_callbacks); | ||
| 393 | if (rebalance) | ||
| 394 | ____rb_erase_color(rebalance, root, dummy_rotate); | ||
| 395 | } | ||
| 396 | |||
| 397 | /* | ||
| 398 | * Augmented rbtree manipulation functions. | ||
| 399 | * | ||
| 400 | * This instantiates the same __always_inline functions as in the non-augmented | ||
| 401 | * case, but this time with user-defined callbacks. | ||
| 402 | */ | ||
| 403 | |||
| 404 | void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, | ||
| 405 | void (*augment_rotate)(struct rb_node *old, struct rb_node *new)) | ||
| 406 | { | ||
| 407 | __rb_insert(node, root, augment_rotate); | ||
| 408 | } | ||
| 409 | |||
| 410 | /* | ||
| 411 | * This function returns the first node (in sort order) of the tree. | ||
| 412 | */ | ||
| 413 | struct rb_node *rb_first(const struct rb_root *root) | ||
| 414 | { | ||
| 415 | struct rb_node *n; | ||
| 416 | |||
| 417 | n = root->rb_node; | ||
| 418 | if (!n) | ||
| 419 | return NULL; | ||
| 420 | while (n->rb_left) | ||
| 421 | n = n->rb_left; | ||
| 422 | return n; | ||
| 423 | } | ||
| 424 | |||
| 425 | struct rb_node *rb_last(const struct rb_root *root) | ||
| 426 | { | ||
| 427 | struct rb_node *n; | ||
| 428 | |||
| 429 | n = root->rb_node; | ||
| 430 | if (!n) | ||
| 431 | return NULL; | ||
| 432 | while (n->rb_right) | ||
| 433 | n = n->rb_right; | ||
| 434 | return n; | ||
| 435 | } | ||
| 436 | |||
| 437 | struct rb_node *rb_next(const struct rb_node *node) | ||
| 438 | { | ||
| 439 | struct rb_node *parent; | ||
| 440 | |||
| 441 | if (RB_EMPTY_NODE(node)) | ||
| 442 | return NULL; | ||
| 443 | |||
| 444 | /* | ||
| 445 | * If we have a right-hand child, go down and then left as far | ||
| 446 | * as we can. | ||
| 447 | */ | ||
| 448 | if (node->rb_right) { | ||
| 449 | node = node->rb_right; | ||
| 450 | while (node->rb_left) | ||
| 451 | node=node->rb_left; | ||
| 452 | return (struct rb_node *)node; | ||
| 453 | } | ||
| 454 | |||
| 455 | /* | ||
| 456 | * No right-hand children. Everything down and left is smaller than us, | ||
| 457 | * so any 'next' node must be in the general direction of our parent. | ||
| 458 | * Go up the tree; any time the ancestor is a right-hand child of its | ||
| 459 | * parent, keep going up. First time it's a left-hand child of its | ||
| 460 | * parent, said parent is our 'next' node. | ||
| 461 | */ | ||
| 462 | while ((parent = rb_parent(node)) && node == parent->rb_right) | ||
| 463 | node = parent; | ||
| 464 | |||
| 465 | return parent; | ||
| 466 | } | ||
| 467 | |||
| 468 | struct rb_node *rb_prev(const struct rb_node *node) | ||
| 469 | { | ||
| 470 | struct rb_node *parent; | ||
| 471 | |||
| 472 | if (RB_EMPTY_NODE(node)) | ||
| 473 | return NULL; | ||
| 474 | |||
| 475 | /* | ||
| 476 | * If we have a left-hand child, go down and then right as far | ||
| 477 | * as we can. | ||
| 478 | */ | ||
| 479 | if (node->rb_left) { | ||
| 480 | node = node->rb_left; | ||
| 481 | while (node->rb_right) | ||
| 482 | node=node->rb_right; | ||
| 483 | return (struct rb_node *)node; | ||
| 484 | } | ||
| 485 | |||
| 486 | /* | ||
| 487 | * No left-hand children. Go up till we find an ancestor which | ||
| 488 | * is a right-hand child of its parent. | ||
| 489 | */ | ||
| 490 | while ((parent = rb_parent(node)) && node == parent->rb_left) | ||
| 491 | node = parent; | ||
| 492 | |||
| 493 | return parent; | ||
| 494 | } | ||
| 495 | |||
| 496 | void rb_replace_node(struct rb_node *victim, struct rb_node *new, | ||
| 497 | struct rb_root *root) | ||
| 498 | { | ||
| 499 | struct rb_node *parent = rb_parent(victim); | ||
| 500 | |||
| 501 | /* Set the surrounding nodes to point to the replacement */ | ||
| 502 | __rb_change_child(victim, new, parent, root); | ||
| 503 | if (victim->rb_left) | ||
| 504 | rb_set_parent(victim->rb_left, new); | ||
| 505 | if (victim->rb_right) | ||
| 506 | rb_set_parent(victim->rb_right, new); | ||
| 507 | |||
| 508 | /* Copy the pointers/colour from the victim to the replacement */ | ||
| 509 | *new = *victim; | ||
| 510 | } | ||
| 511 | |||
| 512 | static struct rb_node *rb_left_deepest_node(const struct rb_node *node) | ||
| 513 | { | ||
| 514 | for (;;) { | ||
| 515 | if (node->rb_left) | ||
| 516 | node = node->rb_left; | ||
| 517 | else if (node->rb_right) | ||
| 518 | node = node->rb_right; | ||
| 519 | else | ||
| 520 | return (struct rb_node *)node; | ||
| 521 | } | ||
| 522 | } | ||
| 523 | |||
| 524 | struct rb_node *rb_next_postorder(const struct rb_node *node) | ||
| 525 | { | ||
| 526 | const struct rb_node *parent; | ||
| 527 | if (!node) | ||
| 528 | return NULL; | ||
| 529 | parent = rb_parent(node); | ||
| 530 | |||
| 531 | /* If we're sitting on node, we've already seen our children */ | ||
| 532 | if (parent && node == parent->rb_left && parent->rb_right) { | ||
| 533 | /* If we are the parent's left node, go to the parent's right | ||
| 534 | * node then all the way down to the left */ | ||
| 535 | return rb_left_deepest_node(parent->rb_right); | ||
| 536 | } else | ||
| 537 | /* Otherwise we are the parent's right node, and the parent | ||
| 538 | * should be next */ | ||
| 539 | return (struct rb_node *)parent; | ||
| 540 | } | ||
| 541 | |||
| 542 | struct rb_node *rb_first_postorder(const struct rb_root *root) | ||
| 543 | { | ||
| 544 | if (!root->rb_node) | ||
| 545 | return NULL; | ||
| 546 | |||
| 547 | return rb_left_deepest_node(root->rb_node); | ||
| 548 | } | ||
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index fe50a1b34aa0..09dc0aabb515 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST | |||
| @@ -18,6 +18,7 @@ tools/arch/x86/include/asm/atomic.h | |||
| 18 | tools/arch/x86/include/asm/rmwcc.h | 18 | tools/arch/x86/include/asm/rmwcc.h |
| 19 | tools/lib/traceevent | 19 | tools/lib/traceevent |
| 20 | tools/lib/api | 20 | tools/lib/api |
| 21 | tools/lib/rbtree.c | ||
| 21 | tools/lib/symbol/kallsyms.c | 22 | tools/lib/symbol/kallsyms.c |
| 22 | tools/lib/symbol/kallsyms.h | 23 | tools/lib/symbol/kallsyms.h |
| 23 | tools/lib/util/find_next_bit.c | 24 | tools/lib/util/find_next_bit.c |
| @@ -44,6 +45,8 @@ tools/include/linux/kernel.h | |||
| 44 | tools/include/linux/list.h | 45 | tools/include/linux/list.h |
| 45 | tools/include/linux/log2.h | 46 | tools/include/linux/log2.h |
| 46 | tools/include/linux/poison.h | 47 | tools/include/linux/poison.h |
| 48 | tools/include/linux/rbtree.h | ||
| 49 | tools/include/linux/rbtree_augmented.h | ||
| 47 | tools/include/linux/types.h | 50 | tools/include/linux/types.h |
| 48 | include/asm-generic/bitops/arch_hweight.h | 51 | include/asm-generic/bitops/arch_hweight.h |
| 49 | include/asm-generic/bitops/const_hweight.h | 52 | include/asm-generic/bitops/const_hweight.h |
| @@ -51,12 +54,10 @@ include/asm-generic/bitops/fls64.h | |||
| 51 | include/asm-generic/bitops/__fls.h | 54 | include/asm-generic/bitops/__fls.h |
| 52 | include/asm-generic/bitops/fls.h | 55 | include/asm-generic/bitops/fls.h |
| 53 | include/linux/perf_event.h | 56 | include/linux/perf_event.h |
| 54 | include/linux/rbtree.h | ||
| 55 | include/linux/list.h | 57 | include/linux/list.h |
| 56 | include/linux/hash.h | 58 | include/linux/hash.h |
| 57 | include/linux/stringify.h | 59 | include/linux/stringify.h |
| 58 | lib/hweight.c | 60 | lib/hweight.c |
| 59 | lib/rbtree.c | ||
| 60 | include/linux/swab.h | 61 | include/linux/swab.h |
| 61 | arch/*/include/asm/unistd*.h | 62 | arch/*/include/asm/unistd*.h |
| 62 | arch/*/include/uapi/asm/unistd*.h | 63 | arch/*/include/uapi/asm/unistd*.h |
| @@ -65,7 +66,6 @@ arch/*/lib/memcpy*.S | |||
| 65 | arch/*/lib/memset*.S | 66 | arch/*/lib/memset*.S |
| 66 | include/linux/poison.h | 67 | include/linux/poison.h |
| 67 | include/linux/hw_breakpoint.h | 68 | include/linux/hw_breakpoint.h |
| 68 | include/linux/rbtree_augmented.h | ||
| 69 | include/uapi/linux/perf_event.h | 69 | include/uapi/linux/perf_event.h |
| 70 | include/uapi/linux/const.h | 70 | include/uapi/linux/const.h |
| 71 | include/uapi/linux/swab.h | 71 | include/uapi/linux/swab.h |
diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 586a59d46022..601d11440596 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build | |||
| @@ -139,7 +139,7 @@ $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c FORCE | |||
| 139 | $(call rule_mkdir) | 139 | $(call rule_mkdir) |
| 140 | $(call if_changed_dep,cc_o_c) | 140 | $(call if_changed_dep,cc_o_c) |
| 141 | 141 | ||
| 142 | $(OUTPUT)util/rbtree.o: ../../lib/rbtree.c FORCE | 142 | $(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE |
| 143 | $(call rule_mkdir) | 143 | $(call rule_mkdir) |
| 144 | $(call if_changed_dep,cc_o_c) | 144 | $(call if_changed_dep,cc_o_c) |
| 145 | 145 | ||
diff --git a/tools/perf/util/include/linux/rbtree.h b/tools/perf/util/include/linux/rbtree.h deleted file mode 100644 index f06d89f0b867..000000000000 --- a/tools/perf/util/include/linux/rbtree.h +++ /dev/null | |||
| @@ -1,16 +0,0 @@ | |||
| 1 | #ifndef __TOOLS_LINUX_PERF_RBTREE_H | ||
| 2 | #define __TOOLS_LINUX_PERF_RBTREE_H | ||
| 3 | #include <stdbool.h> | ||
| 4 | #include "../../../../include/linux/rbtree.h" | ||
| 5 | |||
| 6 | /* | ||
| 7 | * Handy for checking that we are not deleting an entry that is | ||
| 8 | * already in a list, found in block/{blk-throttle,cfq-iosched}.c, | ||
| 9 | * probably should be moved to lib/rbtree.c... | ||
| 10 | */ | ||
| 11 | static inline void rb_erase_init(struct rb_node *n, struct rb_root *root) | ||
| 12 | { | ||
| 13 | rb_erase(n, root); | ||
| 14 | RB_CLEAR_NODE(n); | ||
| 15 | } | ||
| 16 | #endif /* __TOOLS_LINUX_PERF_RBTREE_H */ | ||
diff --git a/tools/perf/util/include/linux/rbtree_augmented.h b/tools/perf/util/include/linux/rbtree_augmented.h deleted file mode 100644 index 9d6fcdf1788b..000000000000 --- a/tools/perf/util/include/linux/rbtree_augmented.h +++ /dev/null | |||
| @@ -1,2 +0,0 @@ | |||
| 1 | #include <stdbool.h> | ||
| 2 | #include "../../../../include/linux/rbtree_augmented.h" | ||
