diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/bitrev.c | 3 | ||||
-rw-r--r-- | lib/div64.c | 10 | ||||
-rw-r--r-- | lib/radix-tree.c | 120 |
3 files changed, 74 insertions, 59 deletions
diff --git a/lib/bitrev.c b/lib/bitrev.c index 989aff73f881..3956203456d4 100644 --- a/lib/bitrev.c +++ b/lib/bitrev.c | |||
@@ -42,10 +42,11 @@ const u8 byte_rev_table[256] = { | |||
42 | }; | 42 | }; |
43 | EXPORT_SYMBOL_GPL(byte_rev_table); | 43 | EXPORT_SYMBOL_GPL(byte_rev_table); |
44 | 44 | ||
45 | static __always_inline u16 bitrev16(u16 x) | 45 | u16 bitrev16(u16 x) |
46 | { | 46 | { |
47 | return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); | 47 | return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); |
48 | } | 48 | } |
49 | EXPORT_SYMBOL(bitrev16); | ||
49 | 50 | ||
50 | /** | 51 | /** |
51 | * bitrev32 - reverse the order of bits in a u32 value | 52 | * bitrev32 - reverse the order of bits in a u32 value |
diff --git a/lib/div64.c b/lib/div64.c index bb5bd0c0f030..a111eb8de9cf 100644 --- a/lib/div64.c +++ b/lib/div64.c | |||
@@ -98,3 +98,13 @@ EXPORT_SYMBOL(div64_u64); | |||
98 | #endif | 98 | #endif |
99 | 99 | ||
100 | #endif /* BITS_PER_LONG == 32 */ | 100 | #endif /* BITS_PER_LONG == 32 */ |
101 | |||
102 | /* | ||
103 | * Iterative div/mod for use when dividend is not expected to be much | ||
104 | * bigger than divisor. | ||
105 | */ | ||
106 | u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) | ||
107 | { | ||
108 | return __iter_div_u64_rem(dividend, divisor, remainder); | ||
109 | } | ||
110 | EXPORT_SYMBOL(iter_div_u64_rem); | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index bd521716ab1a..169a2f8dabcc 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -88,6 +88,57 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root) | |||
88 | return root->gfp_mask & __GFP_BITS_MASK; | 88 | return root->gfp_mask & __GFP_BITS_MASK; |
89 | } | 89 | } |
90 | 90 | ||
91 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, | ||
92 | int offset) | ||
93 | { | ||
94 | __set_bit(offset, node->tags[tag]); | ||
95 | } | ||
96 | |||
97 | static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, | ||
98 | int offset) | ||
99 | { | ||
100 | __clear_bit(offset, node->tags[tag]); | ||
101 | } | ||
102 | |||
103 | static inline int tag_get(struct radix_tree_node *node, unsigned int tag, | ||
104 | int offset) | ||
105 | { | ||
106 | return test_bit(offset, node->tags[tag]); | ||
107 | } | ||
108 | |||
109 | static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) | ||
110 | { | ||
111 | root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); | ||
112 | } | ||
113 | |||
114 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) | ||
115 | { | ||
116 | root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); | ||
117 | } | ||
118 | |||
119 | static inline void root_tag_clear_all(struct radix_tree_root *root) | ||
120 | { | ||
121 | root->gfp_mask &= __GFP_BITS_MASK; | ||
122 | } | ||
123 | |||
124 | static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) | ||
125 | { | ||
126 | return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Returns 1 if any slot in the node has this tag set. | ||
131 | * Otherwise returns 0. | ||
132 | */ | ||
133 | static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) | ||
134 | { | ||
135 | int idx; | ||
136 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | ||
137 | if (node->tags[tag][idx]) | ||
138 | return 1; | ||
139 | } | ||
140 | return 0; | ||
141 | } | ||
91 | /* | 142 | /* |
92 | * This assumes that the caller has performed appropriate preallocation, and | 143 | * This assumes that the caller has performed appropriate preallocation, and |
93 | * that the caller has pinned this thread of control to the current CPU. | 144 | * that the caller has pinned this thread of control to the current CPU. |
@@ -124,6 +175,17 @@ static void radix_tree_node_rcu_free(struct rcu_head *head) | |||
124 | { | 175 | { |
125 | struct radix_tree_node *node = | 176 | struct radix_tree_node *node = |
126 | container_of(head, struct radix_tree_node, rcu_head); | 177 | container_of(head, struct radix_tree_node, rcu_head); |
178 | |||
179 | /* | ||
180 | * must only free zeroed nodes into the slab. radix_tree_shrink | ||
181 | * can leave us with a non-NULL entry in the first slot, so clear | ||
182 | * that here to make sure. | ||
183 | */ | ||
184 | tag_clear(node, 0, 0); | ||
185 | tag_clear(node, 1, 0); | ||
186 | node->slots[0] = NULL; | ||
187 | node->count = 0; | ||
188 | |||
127 | kmem_cache_free(radix_tree_node_cachep, node); | 189 | kmem_cache_free(radix_tree_node_cachep, node); |
128 | } | 190 | } |
129 | 191 | ||
@@ -165,59 +227,6 @@ out: | |||
165 | } | 227 | } |
166 | EXPORT_SYMBOL(radix_tree_preload); | 228 | EXPORT_SYMBOL(radix_tree_preload); |
167 | 229 | ||
168 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, | ||
169 | int offset) | ||
170 | { | ||
171 | __set_bit(offset, node->tags[tag]); | ||
172 | } | ||
173 | |||
174 | static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, | ||
175 | int offset) | ||
176 | { | ||
177 | __clear_bit(offset, node->tags[tag]); | ||
178 | } | ||
179 | |||
180 | static inline int tag_get(struct radix_tree_node *node, unsigned int tag, | ||
181 | int offset) | ||
182 | { | ||
183 | return test_bit(offset, node->tags[tag]); | ||
184 | } | ||
185 | |||
186 | static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) | ||
187 | { | ||
188 | root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); | ||
189 | } | ||
190 | |||
191 | |||
192 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) | ||
193 | { | ||
194 | root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); | ||
195 | } | ||
196 | |||
197 | static inline void root_tag_clear_all(struct radix_tree_root *root) | ||
198 | { | ||
199 | root->gfp_mask &= __GFP_BITS_MASK; | ||
200 | } | ||
201 | |||
202 | static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) | ||
203 | { | ||
204 | return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Returns 1 if any slot in the node has this tag set. | ||
209 | * Otherwise returns 0. | ||
210 | */ | ||
211 | static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) | ||
212 | { | ||
213 | int idx; | ||
214 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | ||
215 | if (node->tags[tag][idx]) | ||
216 | return 1; | ||
217 | } | ||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | /* | 230 | /* |
222 | * Return the maximum key which can be store into a | 231 | * Return the maximum key which can be store into a |
223 | * radix tree with height HEIGHT. | 232 | * radix tree with height HEIGHT. |
@@ -930,11 +939,6 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) | |||
930 | newptr = radix_tree_ptr_to_indirect(newptr); | 939 | newptr = radix_tree_ptr_to_indirect(newptr); |
931 | root->rnode = newptr; | 940 | root->rnode = newptr; |
932 | root->height--; | 941 | root->height--; |
933 | /* must only free zeroed nodes into the slab */ | ||
934 | tag_clear(to_free, 0, 0); | ||
935 | tag_clear(to_free, 1, 0); | ||
936 | to_free->slots[0] = NULL; | ||
937 | to_free->count = 0; | ||
938 | radix_tree_node_free(to_free); | 942 | radix_tree_node_free(to_free); |
939 | } | 943 | } |
940 | } | 944 | } |