diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-17 11:58:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-17 11:58:04 -0400 |
commit | 57a8ec387e1441ea5e1232bc0749fb99a8cba7e7 (patch) | |
tree | b5fb03fc6bc5754de8b5b1f8b0e4f36d67c8315c /include/linux/rbtree.h | |
parent | 0a8ad0ffa4d80a544f6cbff703bf6394339afcdf (diff) | |
parent | 43e11fa2d1d3b6e35629fa556eb7d571edba2010 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton:
"VM:
- z3fold fixes and enhancements by Henry Burns and Vitaly Wool
- more accurate reclaimed slab caches calculations by Yafang Shao
- fix MAP_UNINITIALIZED UAPI symbol to not depend on config, by
Christoph Hellwig
- !CONFIG_MMU fixes by Christoph Hellwig
- new novmcoredd parameter to omit device dumps from vmcore, by
Kairui Song
- new test_meminit module for testing heap and pagealloc
initialization, by Alexander Potapenko
- ioremap improvements for huge mappings, by Anshuman Khandual
- generalize kprobe page fault handling, by Anshuman Khandual
- device-dax hotplug fixes and improvements, by Pavel Tatashin
- enable synchronous DAX fault on powerpc, by Aneesh Kumar K.V
- add pte_devmap() support for arm64, by Robin Murphy
- unify locked_vm accounting with a helper, by Daniel Jordan
- several misc fixes
core/lib:
- new typeof_member() macro including some users, by Alexey Dobriyan
- make BIT() and GENMASK() available in asm, by Masahiro Yamada
- changed LIST_POISON2 on x86_64 to 0xdead000000000122 for better
code generation, by Alexey Dobriyan
- rbtree code size optimizations, by Michel Lespinasse
- convert struct pid count to refcount_t, by Joel Fernandes
get_maintainer.pl:
- add --no-moderated switch to skip moderated ML's, by Joe Perches
misc:
- ptrace PTRACE_GET_SYSCALL_INFO interface
- coda updates
- gdb scripts, various"
[ Using merge message suggestion from Vlastimil Babka, with some editing - Linus ]
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (100 commits)
fs/select.c: use struct_size() in kmalloc()
mm: add account_locked_vm utility function
arm64: mm: implement pte_devmap support
mm: introduce ARCH_HAS_PTE_DEVMAP
mm: clean up is_device_*_page() definitions
mm/mmap: move common defines to mman-common.h
mm: move MAP_SYNC to asm-generic/mman-common.h
device-dax: "Hotremove" persistent memory that is used like normal RAM
mm/hotplug: make remove_memory() interface usable
device-dax: fix memory and resource leak if hotplug fails
include/linux/lz4.h: fix spelling and copy-paste errors in documentation
ipc/mqueue.c: only perform resource calculation if user valid
include/asm-generic/bug.h: fix "cut here" for WARN_ON for __WARN_TAINT architectures
scripts/gdb: add helpers to find and list devices
scripts/gdb: add lx-genpd-summary command
drivers/pps/pps.c: clear offset flags in PPS_SETPARAMS ioctl
kernel/pid.c: convert struct pid count to refcount_t
drivers/rapidio/devices/rio_mport_cdev.c: NUL terminate some strings
select: shift restore_saved_sigmask_unless() into poll_select_copy_remaining()
select: change do_poll() to return -ERESTARTNOHAND rather than -EINTR
...
Diffstat (limited to 'include/linux/rbtree.h')
-rw-r--r-- | include/linux/rbtree.h | 70 |
1 files changed, 46 insertions, 24 deletions
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index e6337fce08f2..1fd61a9af45c 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h | |||
@@ -32,25 +32,9 @@ struct rb_root { | |||
32 | struct rb_node *rb_node; | 32 | struct rb_node *rb_node; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | /* | ||
36 | * Leftmost-cached rbtrees. | ||
37 | * | ||
38 | * We do not cache the rightmost node based on footprint | ||
39 | * size vs number of potential users that could benefit | ||
40 | * from O(1) rb_last(). Just not worth it, users that want | ||
41 | * this feature can always implement the logic explicitly. | ||
42 | * Furthermore, users that want to cache both pointers may | ||
43 | * find it a bit asymmetric, but that's ok. | ||
44 | */ | ||
45 | struct rb_root_cached { | ||
46 | struct rb_root rb_root; | ||
47 | struct rb_node *rb_leftmost; | ||
48 | }; | ||
49 | |||
50 | #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) | 35 | #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) |
51 | 36 | ||
52 | #define RB_ROOT (struct rb_root) { NULL, } | 37 | #define RB_ROOT (struct rb_root) { NULL, } |
53 | #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } | ||
54 | #define rb_entry(ptr, type, member) container_of(ptr, type, member) | 38 | #define rb_entry(ptr, type, member) container_of(ptr, type, member) |
55 | 39 | ||
56 | #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) | 40 | #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) |
@@ -72,12 +56,6 @@ extern struct rb_node *rb_prev(const struct rb_node *); | |||
72 | extern struct rb_node *rb_first(const struct rb_root *); | 56 | extern struct rb_node *rb_first(const struct rb_root *); |
73 | extern struct rb_node *rb_last(const struct rb_root *); | 57 | extern struct rb_node *rb_last(const struct rb_root *); |
74 | 58 | ||
75 | extern void rb_insert_color_cached(struct rb_node *, | ||
76 | struct rb_root_cached *, bool); | ||
77 | extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *); | ||
78 | /* Same as rb_first(), but O(1) */ | ||
79 | #define rb_first_cached(root) (root)->rb_leftmost | ||
80 | |||
81 | /* Postorder iteration - always visit the parent after its children */ | 59 | /* Postorder iteration - always visit the parent after its children */ |
82 | extern struct rb_node *rb_first_postorder(const struct rb_root *); | 60 | extern struct rb_node *rb_first_postorder(const struct rb_root *); |
83 | extern struct rb_node *rb_next_postorder(const struct rb_node *); | 61 | extern struct rb_node *rb_next_postorder(const struct rb_node *); |
@@ -87,8 +65,6 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, | |||
87 | struct rb_root *root); | 65 | struct rb_root *root); |
88 | extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, | 66 | extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, |
89 | struct rb_root *root); | 67 | struct rb_root *root); |
90 | extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, | ||
91 | struct rb_root_cached *root); | ||
92 | 68 | ||
93 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, | 69 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, |
94 | struct rb_node **rb_link) | 70 | struct rb_node **rb_link) |
@@ -136,4 +112,50 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent | |||
136 | typeof(*pos), field); 1; }); \ | 112 | typeof(*pos), field); 1; }); \ |
137 | pos = n) | 113 | pos = n) |
138 | 114 | ||
115 | /* | ||
116 | * Leftmost-cached rbtrees. | ||
117 | * | ||
118 | * We do not cache the rightmost node based on footprint | ||
119 | * size vs number of potential users that could benefit | ||
120 | * from O(1) rb_last(). Just not worth it, users that want | ||
121 | * this feature can always implement the logic explicitly. | ||
122 | * Furthermore, users that want to cache both pointers may | ||
123 | * find it a bit asymmetric, but that's ok. | ||
124 | */ | ||
125 | struct rb_root_cached { | ||
126 | struct rb_root rb_root; | ||
127 | struct rb_node *rb_leftmost; | ||
128 | }; | ||
129 | |||
130 | #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } | ||
131 | |||
132 | /* Same as rb_first(), but O(1) */ | ||
133 | #define rb_first_cached(root) (root)->rb_leftmost | ||
134 | |||
135 | static inline void rb_insert_color_cached(struct rb_node *node, | ||
136 | struct rb_root_cached *root, | ||
137 | bool leftmost) | ||
138 | { | ||
139 | if (leftmost) | ||
140 | root->rb_leftmost = node; | ||
141 | rb_insert_color(node, &root->rb_root); | ||
142 | } | ||
143 | |||
144 | static inline void rb_erase_cached(struct rb_node *node, | ||
145 | struct rb_root_cached *root) | ||
146 | { | ||
147 | if (root->rb_leftmost == node) | ||
148 | root->rb_leftmost = rb_next(node); | ||
149 | rb_erase(node, &root->rb_root); | ||
150 | } | ||
151 | |||
152 | static inline void rb_replace_node_cached(struct rb_node *victim, | ||
153 | struct rb_node *new, | ||
154 | struct rb_root_cached *root) | ||
155 | { | ||
156 | if (root->rb_leftmost == victim) | ||
157 | root->rb_leftmost = new; | ||
158 | rb_replace_node(victim, new, &root->rb_root); | ||
159 | } | ||
160 | |||
139 | #endif /* _LINUX_RBTREE_H */ | 161 | #endif /* _LINUX_RBTREE_H */ |