diff options
author | Matthew Wilcox <willy@linux.intel.com> | 2016-05-20 20:03:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-20 20:58:30 -0400 |
commit | 2fcd9005cc03ab09ea2a940515ed728d43df66c4 (patch) | |
tree | dae10dab206adfe5d9310b697ac7a6fbc8a6042d /lib | |
parent | b76ba4af4ddd6a06f7f65769e7be1bc56556cdf5 (diff) |
radix-tree: miscellaneous fixes
Typos, whitespace, grammar, line length, using the correct types, etc.
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Kirill Shutemov <kirill.shutemov@linux.intel.com>
Cc: Jan Kara <jack@suse.com>
Cc: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/radix-tree.c | 70 |
1 files changed, 36 insertions, 34 deletions
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 1e75813b9f34..75944e42e4a0 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -66,7 +66,7 @@ static struct kmem_cache *radix_tree_node_cachep; | |||
66 | * Per-cpu pool of preloaded nodes | 66 | * Per-cpu pool of preloaded nodes |
67 | */ | 67 | */ |
68 | struct radix_tree_preload { | 68 | struct radix_tree_preload { |
69 | int nr; | 69 | unsigned nr; |
70 | /* nodes->private_data points to next preallocated node */ | 70 | /* nodes->private_data points to next preallocated node */ |
71 | struct radix_tree_node *nodes; | 71 | struct radix_tree_node *nodes; |
72 | }; | 72 | }; |
@@ -147,7 +147,7 @@ static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) | |||
147 | root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); | 147 | root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); |
148 | } | 148 | } |
149 | 149 | ||
150 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) | 150 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) |
151 | { | 151 | { |
152 | root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); | 152 | root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); |
153 | } | 153 | } |
@@ -159,7 +159,7 @@ static inline void root_tag_clear_all(struct radix_tree_root *root) | |||
159 | 159 | ||
160 | static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) | 160 | static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) |
161 | { | 161 | { |
162 | return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); | 162 | return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); |
163 | } | 163 | } |
164 | 164 | ||
165 | static inline unsigned root_tags_get(struct radix_tree_root *root) | 165 | static inline unsigned root_tags_get(struct radix_tree_root *root) |
@@ -173,7 +173,7 @@ static inline unsigned root_tags_get(struct radix_tree_root *root) | |||
173 | */ | 173 | */ |
174 | static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) | 174 | static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) |
175 | { | 175 | { |
176 | int idx; | 176 | unsigned idx; |
177 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { | 177 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { |
178 | if (node->tags[tag][idx]) | 178 | if (node->tags[tag][idx]) |
179 | return 1; | 179 | return 1; |
@@ -273,9 +273,9 @@ radix_tree_node_alloc(struct radix_tree_root *root) | |||
273 | gfp_t gfp_mask = root_gfp_mask(root); | 273 | gfp_t gfp_mask = root_gfp_mask(root); |
274 | 274 | ||
275 | /* | 275 | /* |
276 | * Preload code isn't irq safe and it doesn't make sence to use | 276 | * Preload code isn't irq safe and it doesn't make sense to use |
277 | * preloading in the interrupt anyway as all the allocations have to | 277 | * preloading during an interrupt anyway as all the allocations have |
278 | * be atomic. So just do normal allocation when in interrupt. | 278 | * to be atomic. So just do normal allocation when in interrupt. |
279 | */ | 279 | */ |
280 | if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { | 280 | if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { |
281 | struct radix_tree_preload *rtp; | 281 | struct radix_tree_preload *rtp; |
@@ -448,7 +448,6 @@ static unsigned radix_tree_load_root(struct radix_tree_root *root, | |||
448 | static int radix_tree_extend(struct radix_tree_root *root, | 448 | static int radix_tree_extend(struct radix_tree_root *root, |
449 | unsigned long index) | 449 | unsigned long index) |
450 | { | 450 | { |
451 | struct radix_tree_node *node; | ||
452 | struct radix_tree_node *slot; | 451 | struct radix_tree_node *slot; |
453 | unsigned int height; | 452 | unsigned int height; |
454 | int tag; | 453 | int tag; |
@@ -465,7 +464,9 @@ static int radix_tree_extend(struct radix_tree_root *root, | |||
465 | 464 | ||
466 | do { | 465 | do { |
467 | unsigned int newheight; | 466 | unsigned int newheight; |
468 | if (!(node = radix_tree_node_alloc(root))) | 467 | struct radix_tree_node *node = radix_tree_node_alloc(root); |
468 | |||
469 | if (!node) | ||
469 | return -ENOMEM; | 470 | return -ENOMEM; |
470 | 471 | ||
471 | /* Propagate the aggregated tag info into the new root */ | 472 | /* Propagate the aggregated tag info into the new root */ |
@@ -542,7 +543,8 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, | |||
542 | while (shift > order) { | 543 | while (shift > order) { |
543 | if (slot == NULL) { | 544 | if (slot == NULL) { |
544 | /* Have to add a child node. */ | 545 | /* Have to add a child node. */ |
545 | if (!(slot = radix_tree_node_alloc(root))) | 546 | slot = radix_tree_node_alloc(root); |
547 | if (!slot) | ||
546 | return -ENOMEM; | 548 | return -ENOMEM; |
547 | slot->path = height; | 549 | slot->path = height; |
548 | slot->parent = node; | 550 | slot->parent = node; |
@@ -722,13 +724,13 @@ EXPORT_SYMBOL(radix_tree_lookup); | |||
722 | * radix_tree_tag_set - set a tag on a radix tree node | 724 | * radix_tree_tag_set - set a tag on a radix tree node |
723 | * @root: radix tree root | 725 | * @root: radix tree root |
724 | * @index: index key | 726 | * @index: index key |
725 | * @tag: tag index | 727 | * @tag: tag index |
726 | * | 728 | * |
727 | * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) | 729 | * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) |
728 | * corresponding to @index in the radix tree. From | 730 | * corresponding to @index in the radix tree. From |
729 | * the root all the way down to the leaf node. | 731 | * the root all the way down to the leaf node. |
730 | * | 732 | * |
731 | * Returns the address of the tagged item. Setting a tag on a not-present | 733 | * Returns the address of the tagged item. Setting a tag on a not-present |
732 | * item is a bug. | 734 | * item is a bug. |
733 | */ | 735 | */ |
734 | void *radix_tree_tag_set(struct radix_tree_root *root, | 736 | void *radix_tree_tag_set(struct radix_tree_root *root, |
@@ -767,11 +769,11 @@ EXPORT_SYMBOL(radix_tree_tag_set); | |||
767 | * radix_tree_tag_clear - clear a tag on a radix tree node | 769 | * radix_tree_tag_clear - clear a tag on a radix tree node |
768 | * @root: radix tree root | 770 | * @root: radix tree root |
769 | * @index: index key | 771 | * @index: index key |
770 | * @tag: tag index | 772 | * @tag: tag index |
771 | * | 773 | * |
772 | * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) | 774 | * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) |
773 | * corresponding to @index in the radix tree. If | 775 | * corresponding to @index in the radix tree. If this causes |
774 | * this causes the leaf node to have no tags set then clear the tag in the | 776 | * the leaf node to have no tags set then clear the tag in the |
775 | * next-to-leaf node, etc. | 777 | * next-to-leaf node, etc. |
776 | * | 778 | * |
777 | * Returns the address of the tagged item on success, else NULL. ie: | 779 | * Returns the address of the tagged item on success, else NULL. ie: |
@@ -829,7 +831,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear); | |||
829 | * radix_tree_tag_get - get a tag on a radix tree node | 831 | * radix_tree_tag_get - get a tag on a radix tree node |
830 | * @root: radix tree root | 832 | * @root: radix tree root |
831 | * @index: index key | 833 | * @index: index key |
832 | * @tag: tag index (< RADIX_TREE_MAX_TAGS) | 834 | * @tag: tag index (< RADIX_TREE_MAX_TAGS) |
833 | * | 835 | * |
834 | * Return values: | 836 | * Return values: |
835 | * | 837 | * |
@@ -1035,7 +1037,7 @@ EXPORT_SYMBOL(radix_tree_next_chunk); | |||
1035 | * set is outside the range we are scanning. This reults in dangling tags and | 1037 | * set is outside the range we are scanning. This reults in dangling tags and |
1036 | * can lead to problems with later tag operations (e.g. livelocks on lookups). | 1038 | * can lead to problems with later tag operations (e.g. livelocks on lookups). |
1037 | * | 1039 | * |
1038 | * The function returns number of leaves where the tag was set and sets | 1040 | * The function returns the number of leaves where the tag was set and sets |
1039 | * *first_indexp to the first unscanned index. | 1041 | * *first_indexp to the first unscanned index. |
1040 | * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must | 1042 | * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must |
1041 | * be prepared to handle that. | 1043 | * be prepared to handle that. |
@@ -1153,9 +1155,10 @@ EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); | |||
1153 | * | 1155 | * |
1154 | * Like radix_tree_lookup, radix_tree_gang_lookup may be called under | 1156 | * Like radix_tree_lookup, radix_tree_gang_lookup may be called under |
1155 | * rcu_read_lock. In this case, rather than the returned results being | 1157 | * rcu_read_lock. In this case, rather than the returned results being |
1156 | * an atomic snapshot of the tree at a single point in time, the semantics | 1158 | * an atomic snapshot of the tree at a single point in time, the |
1157 | * of an RCU protected gang lookup are as though multiple radix_tree_lookups | 1159 | * semantics of an RCU protected gang lookup are as though multiple |
1158 | * have been issued in individual locks, and results stored in 'results'. | 1160 | * radix_tree_lookups have been issued in individual locks, and results |
1161 | * stored in 'results'. | ||
1159 | */ | 1162 | */ |
1160 | unsigned int | 1163 | unsigned int |
1161 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | 1164 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, |
@@ -1460,7 +1463,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) | |||
1460 | * their slot to become empty sooner or later. | 1463 | * their slot to become empty sooner or later. |
1461 | * | 1464 | * |
1462 | * For example, lockless pagecache will look up a slot, deref | 1465 | * For example, lockless pagecache will look up a slot, deref |
1463 | * the page pointer, and if the page is 0 refcount it means it | 1466 | * the page pointer, and if the page has 0 refcount it means it |
1464 | * was concurrently deleted from pagecache so try the deref | 1467 | * was concurrently deleted from pagecache so try the deref |
1465 | * again. Fortunately there is already a requirement for logic | 1468 | * again. Fortunately there is already a requirement for logic |
1466 | * to retry the entire slot lookup -- the indirect pointer | 1469 | * to retry the entire slot lookup -- the indirect pointer |
@@ -1649,24 +1652,23 @@ static __init void radix_tree_init_maxindex(void) | |||
1649 | } | 1652 | } |
1650 | 1653 | ||
1651 | static int radix_tree_callback(struct notifier_block *nfb, | 1654 | static int radix_tree_callback(struct notifier_block *nfb, |
1652 | unsigned long action, | 1655 | unsigned long action, void *hcpu) |
1653 | void *hcpu) | ||
1654 | { | 1656 | { |
1655 | int cpu = (long)hcpu; | 1657 | int cpu = (long)hcpu; |
1656 | struct radix_tree_preload *rtp; | 1658 | struct radix_tree_preload *rtp; |
1657 | struct radix_tree_node *node; | 1659 | struct radix_tree_node *node; |
1658 | 1660 | ||
1659 | /* Free per-cpu pool of perloaded nodes */ | 1661 | /* Free per-cpu pool of preloaded nodes */ |
1660 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | 1662 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { |
1661 | rtp = &per_cpu(radix_tree_preloads, cpu); | 1663 | rtp = &per_cpu(radix_tree_preloads, cpu); |
1662 | while (rtp->nr) { | 1664 | while (rtp->nr) { |
1663 | node = rtp->nodes; | 1665 | node = rtp->nodes; |
1664 | rtp->nodes = node->private_data; | 1666 | rtp->nodes = node->private_data; |
1665 | kmem_cache_free(radix_tree_node_cachep, node); | 1667 | kmem_cache_free(radix_tree_node_cachep, node); |
1666 | rtp->nr--; | 1668 | rtp->nr--; |
1667 | } | 1669 | } |
1668 | } | 1670 | } |
1669 | return NOTIFY_OK; | 1671 | return NOTIFY_OK; |
1670 | } | 1672 | } |
1671 | 1673 | ||
1672 | void __init radix_tree_init(void) | 1674 | void __init radix_tree_init(void) |