aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-28 23:29:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-28 23:29:41 -0500
commitcf393195c3ba5d4c0a8e237eb00f7ef104876ee5 (patch)
tree8aa515ca0e0c00bffbc8dccb9d36ea319f251a12
parent5ecc5ac215bc4d88243a2f4909e70ccc1bda710f (diff)
parentc6ce3e2fe3dacda5e8afb0036c814ae9c3fee9b9 (diff)
Merge branch 'idr-4.11' of git://git.infradead.org/users/willy/linux-dax
Pull IDR rewrite from Matthew Wilcox: "The most significant part of the following is the patch to rewrite the IDR & IDA to be clients of the radix tree. But there's much more, including an enhancement of the IDA to be significantly more space efficient, an IDR & IDA test suite, some improvements to the IDR API (and driver changes to take advantage of those improvements), several improvements to the radix tree test suite and RCU annotations. The IDR & IDA rewrite had a good spin in linux-next and Andrew's tree for most of the last cycle. Coupled with the IDR test suite, I feel pretty confident that any remaining bugs are quite hard to hit. 0-day did a great job of watching my git tree and pointing out problems; as it hit them, I added new test-cases to be sure not to be caught the same way twice" Willy goes on to expand a bit on the IDR rewrite rationale: "The radix tree and the IDR use very similar data structures. Merging the two codebases lets us share the memory allocation pools, and results in a net deletion of 500 lines of code. It also opens up the possibility of exposing more of the features of the radix tree to users of the IDR (and I have some interesting patches along those lines waiting for 4.12) It also shrinks the size of the 'struct idr' from 40 bytes to 24 which will shrink a fair few data structures that embed an IDR" * 'idr-4.11' of git://git.infradead.org/users/willy/linux-dax: (32 commits) radix tree test suite: Add config option for map shift idr: Add missing __rcu annotations radix-tree: Fix __rcu annotations radix-tree: Add rcu_dereference and rcu_assign_pointer calls radix tree test suite: Run iteration tests for longer radix tree test suite: Fix split/join memory leaks radix tree test suite: Fix leaks in regression2.c radix tree test suite: Fix leaky tests radix tree test suite: Enable address sanitizer radix_tree_iter_resume: Fix out of bounds error radix-tree: Store a pointer to the root in each node radix-tree: Chain preallocated nodes through ->parent radix tree test suite: Dial down verbosity with -v radix tree test suite: Introduce kmalloc_verbose idr: Return the deleted entry from idr_remove radix tree test suite: Build separate binaries for some tests ida: Use exceptional entries for small IDAs ida: Move ida_bitmap to a percpu variable Reimplement IDR and IDA using the radix tree radix-tree: Add radix_tree_iter_delete ...
-rw-r--r--drivers/atm/nicstar.c5
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/firewire/core-cdev.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c4
-rw-r--r--drivers/target/target_core_user.c4
-rw-r--r--include/linux/idr.h148
-rw-r--r--include/linux/radix-tree.h179
-rw-r--r--init/main.c3
-rw-r--r--lib/Makefile3
-rw-r--r--lib/idr.c1242
-rw-r--r--lib/radix-tree.c761
-rw-r--r--mm/workingset.c6
-rw-r--r--net/mac80211/status.c4
-rw-r--r--tools/include/asm-generic/bitops/atomic.h3
-rw-r--r--tools/include/asm/bug.h8
-rw-r--r--tools/include/linux/bitmap.h1
-rw-r--r--tools/include/linux/bitops.h1
-rw-r--r--tools/include/linux/compiler.h4
-rw-r--r--tools/include/linux/spinlock.h5
-rw-r--r--tools/testing/radix-tree/.gitignore4
-rw-r--r--tools/testing/radix-tree/Makefile46
-rw-r--r--tools/testing/radix-tree/benchmark.c6
-rw-r--r--tools/testing/radix-tree/generated/autoconf.h2
-rw-r--r--tools/testing/radix-tree/idr-test.c444
-rw-r--r--tools/testing/radix-tree/iteration_check.c2
-rw-r--r--tools/testing/radix-tree/linux.c39
-rw-r--r--tools/testing/radix-tree/linux/bitops.h160
-rw-r--r--tools/testing/radix-tree/linux/bitops/__ffs.h43
-rw-r--r--tools/testing/radix-tree/linux/bitops/ffs.h41
-rw-r--r--tools/testing/radix-tree/linux/bitops/ffz.h12
-rw-r--r--tools/testing/radix-tree/linux/bitops/find.h13
-rw-r--r--tools/testing/radix-tree/linux/bitops/fls.h41
-rw-r--r--tools/testing/radix-tree/linux/bitops/fls64.h14
-rw-r--r--tools/testing/radix-tree/linux/bitops/hweight.h11
-rw-r--r--tools/testing/radix-tree/linux/bitops/le.h53
-rw-r--r--tools/testing/radix-tree/linux/bitops/non-atomic.h110
-rw-r--r--tools/testing/radix-tree/linux/export.h2
-rw-r--r--tools/testing/radix-tree/linux/gfp.h10
-rw-r--r--tools/testing/radix-tree/linux/idr.h1
-rw-r--r--tools/testing/radix-tree/linux/init.h2
-rw-r--r--tools/testing/radix-tree/linux/kernel.h55
-rw-r--r--tools/testing/radix-tree/linux/mempool.h16
-rw-r--r--tools/testing/radix-tree/linux/percpu.h5
-rw-r--r--tools/testing/radix-tree/linux/preempt.h10
-rw-r--r--tools/testing/radix-tree/linux/radix-tree.h25
-rw-r--r--tools/testing/radix-tree/linux/types.h23
-rw-r--r--tools/testing/radix-tree/main.c53
-rw-r--r--tools/testing/radix-tree/multiorder.c39
-rw-r--r--tools/testing/radix-tree/regression1.c4
-rw-r--r--tools/testing/radix-tree/regression2.c10
-rw-r--r--tools/testing/radix-tree/regression3.c28
-rw-r--r--tools/testing/radix-tree/tag_check.c22
-rw-r--r--tools/testing/radix-tree/test.c28
-rw-r--r--tools/testing/radix-tree/test.h2
56 files changed, 1703 insertions, 2077 deletions
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index cb28579e8a94..d879f3bca107 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -1980,13 +1980,12 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
1980 card->lbfqc = ns_stat_lfbqc_get(stat); 1980 card->lbfqc = ns_stat_lfbqc_get(stat);
1981 1981
1982 id = le32_to_cpu(rsqe->buffer_handle); 1982 id = le32_to_cpu(rsqe->buffer_handle);
1983 skb = idr_find(&card->idr, id); 1983 skb = idr_remove(&card->idr, id);
1984 if (!skb) { 1984 if (!skb) {
1985 RXPRINTK(KERN_ERR 1985 RXPRINTK(KERN_ERR
1986 "nicstar%d: idr_find() failed!\n", card->index); 1986 "nicstar%d: skb not found!\n", card->index);
1987 return; 1987 return;
1988 } 1988 }
1989 idr_remove(&card->idr, id);
1990 dma_sync_single_for_cpu(&card->pcidev->dev, 1989 dma_sync_single_for_cpu(&card->pcidev->dev,
1991 NS_PRV_DMA(skb), 1990 NS_PRV_DMA(skb),
1992 (NS_PRV_BUFTYPE(skb) == BUF_SM 1991 (NS_PRV_BUFTYPE(skb) == BUF_SM
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 615e5b5178a0..116509852a34 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2915,11 +2915,9 @@ out_idr_remove_vol:
2915 idr_remove(&connection->peer_devices, vnr); 2915 idr_remove(&connection->peer_devices, vnr);
2916out_idr_remove_from_resource: 2916out_idr_remove_from_resource:
2917 for_each_connection(connection, resource) { 2917 for_each_connection(connection, resource) {
2918 peer_device = idr_find(&connection->peer_devices, vnr); 2918 peer_device = idr_remove(&connection->peer_devices, vnr);
2919 if (peer_device) { 2919 if (peer_device)
2920 idr_remove(&connection->peer_devices, vnr);
2921 kref_put(&connection->kref, drbd_destroy_connection); 2920 kref_put(&connection->kref, drbd_destroy_connection);
2922 }
2923 } 2921 }
2924 for_each_peer_device_safe(peer_device, tmp_peer_device, device) { 2922 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2925 list_del(&peer_device->peer_devices); 2923 list_del(&peer_device->peer_devices);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index aee149bdf4c0..a301fcf46e88 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1307,8 +1307,7 @@ static void iso_resource_work(struct work_struct *work)
1307 */ 1307 */
1308 if (r->todo == ISO_RES_REALLOC && !success && 1308 if (r->todo == ISO_RES_REALLOC && !success &&
1309 !client->in_shutdown && 1309 !client->in_shutdown &&
1310 idr_find(&client->resource_idr, r->resource.handle)) { 1310 idr_remove(&client->resource_idr, r->resource.handle)) {
1311 idr_remove(&client->resource_idr, r->resource.handle);
1312 client_put(client); 1311 client_put(client);
1313 free = true; 1312 free = true;
1314 } 1313 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index c02db01f6583..0218cea6be4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -70,10 +70,10 @@ static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
70 struct amdgpu_bo_list *list; 70 struct amdgpu_bo_list *list;
71 71
72 mutex_lock(&fpriv->bo_list_lock); 72 mutex_lock(&fpriv->bo_list_lock);
73 list = idr_find(&fpriv->bo_list_handles, id); 73 list = idr_remove(&fpriv->bo_list_handles, id);
74 if (list) { 74 if (list) {
75 /* Another user may have a reference to this list still */
75 mutex_lock(&list->lock); 76 mutex_lock(&list->lock);
76 idr_remove(&fpriv->bo_list_handles, id);
77 mutex_unlock(&list->lock); 77 mutex_unlock(&list->lock);
78 amdgpu_bo_list_free(list); 78 amdgpu_bo_list_free(list);
79 } 79 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 400c66ba4c6b..cf0500671353 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -135,15 +135,11 @@ static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
135 struct amdgpu_ctx *ctx; 135 struct amdgpu_ctx *ctx;
136 136
137 mutex_lock(&mgr->lock); 137 mutex_lock(&mgr->lock);
138 ctx = idr_find(&mgr->ctx_handles, id); 138 ctx = idr_remove(&mgr->ctx_handles, id);
139 if (ctx) { 139 if (ctx)
140 idr_remove(&mgr->ctx_handles, id);
141 kref_put(&ctx->refcount, amdgpu_ctx_do_release); 140 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
142 mutex_unlock(&mgr->lock);
143 return 0;
144 }
145 mutex_unlock(&mgr->lock); 141 mutex_unlock(&mgr->lock);
146 return -EINVAL; 142 return ctx ? 0 : -EINVAL;
147} 143}
148 144
149static int amdgpu_ctx_query(struct amdgpu_device *adev, 145static int amdgpu_ctx_query(struct amdgpu_device *adev,
diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c
index abdd0cf710bf..fac28bd8fbee 100644
--- a/drivers/net/wireless/marvell/mwifiex/txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/txrx.c
@@ -346,9 +346,7 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
346 return; 346 return;
347 347
348 spin_lock_irqsave(&priv->ack_status_lock, flags); 348 spin_lock_irqsave(&priv->ack_status_lock, flags);
349 ack_skb = idr_find(&priv->ack_status_frames, tx_status->tx_token_id); 349 ack_skb = idr_remove(&priv->ack_status_frames, tx_status->tx_token_id);
350 if (ack_skb)
351 idr_remove(&priv->ack_status_frames, tx_status->tx_token_id);
352 spin_unlock_irqrestore(&priv->ack_status_lock, flags); 350 spin_unlock_irqrestore(&priv->ack_status_lock, flags);
353 351
354 if (ack_skb) { 352 if (ack_skb) {
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 5c1cb2df3a54..c3adefe95e50 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -642,9 +642,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
642 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 642 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
643 643
644 spin_lock(&udev->commands_lock); 644 spin_lock(&udev->commands_lock);
645 cmd = idr_find(&udev->commands, entry->hdr.cmd_id); 645 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
646 if (cmd)
647 idr_remove(&udev->commands, cmd->cmd_id);
648 spin_unlock(&udev->commands_lock); 646 spin_unlock(&udev->commands_lock);
649 647
650 if (!cmd) { 648 if (!cmd) {
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 3c01b89aed67..bf70b3ef0a07 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -12,47 +12,29 @@
12#ifndef __IDR_H__ 12#ifndef __IDR_H__
13#define __IDR_H__ 13#define __IDR_H__
14 14
15#include <linux/types.h> 15#include <linux/radix-tree.h>
16#include <linux/bitops.h> 16#include <linux/gfp.h>
17#include <linux/init.h> 17#include <linux/percpu.h>
18#include <linux/rcupdate.h> 18
19struct idr {
20 struct radix_tree_root idr_rt;
21 unsigned int idr_next;
22};
19 23
20/* 24/*
21 * Using 6 bits at each layer allows us to allocate 7 layers out of each page. 25 * The IDR API does not expose the tagging functionality of the radix tree
22 * 8 bits only gave us 3 layers out of every pair of pages, which is less 26 * to users. Use tag 0 to track whether a node has free space below it.
23 * efficient except for trees with a largest element between 192-255 inclusive.
24 */ 27 */
25#define IDR_BITS 6 28#define IDR_FREE 0
26#define IDR_SIZE (1 << IDR_BITS)
27#define IDR_MASK ((1 << IDR_BITS)-1)
28
29struct idr_layer {
30 int prefix; /* the ID prefix of this idr_layer */
31 int layer; /* distance from leaf */
32 struct idr_layer __rcu *ary[1<<IDR_BITS];
33 int count; /* When zero, we can release it */
34 union {
35 /* A zero bit means "space here" */
36 DECLARE_BITMAP(bitmap, IDR_SIZE);
37 struct rcu_head rcu_head;
38 };
39};
40 29
41struct idr { 30/* Set the IDR flag and the IDR_FREE tag */
42 struct idr_layer __rcu *hint; /* the last layer allocated from */ 31#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
43 struct idr_layer __rcu *top;
44 int layers; /* only valid w/o concurrent changes */
45 int cur; /* current pos for cyclic allocation */
46 spinlock_t lock;
47 int id_free_cnt;
48 struct idr_layer *id_free;
49};
50 32
51#define IDR_INIT(name) \ 33#define IDR_INIT \
52{ \ 34{ \
53 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 35 .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \
54} 36}
55#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) 37#define DEFINE_IDR(name) struct idr name = IDR_INIT
56 38
57/** 39/**
58 * idr_get_cursor - Return the current position of the cyclic allocator 40 * idr_get_cursor - Return the current position of the cyclic allocator
@@ -62,9 +44,9 @@ struct idr {
62 * idr_alloc_cyclic() if it is free (otherwise the search will start from 44 * idr_alloc_cyclic() if it is free (otherwise the search will start from
63 * this position). 45 * this position).
64 */ 46 */
65static inline unsigned int idr_get_cursor(struct idr *idr) 47static inline unsigned int idr_get_cursor(const struct idr *idr)
66{ 48{
67 return READ_ONCE(idr->cur); 49 return READ_ONCE(idr->idr_next);
68} 50}
69 51
70/** 52/**
@@ -77,7 +59,7 @@ static inline unsigned int idr_get_cursor(struct idr *idr)
77 */ 59 */
78static inline void idr_set_cursor(struct idr *idr, unsigned int val) 60static inline void idr_set_cursor(struct idr *idr, unsigned int val)
79{ 61{
80 WRITE_ONCE(idr->cur, val); 62 WRITE_ONCE(idr->idr_next, val);
81} 63}
82 64
83/** 65/**
@@ -97,22 +79,31 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
97 * period). 79 * period).
98 */ 80 */
99 81
100/*
101 * This is what we export.
102 */
103
104void *idr_find_slowpath(struct idr *idp, int id);
105void idr_preload(gfp_t gfp_mask); 82void idr_preload(gfp_t gfp_mask);
106int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); 83int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t);
107int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); 84int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
108int idr_for_each(struct idr *idp, 85int idr_for_each(const struct idr *,
109 int (*fn)(int id, void *p, void *data), void *data); 86 int (*fn)(int id, void *p, void *data), void *data);
110void *idr_get_next(struct idr *idp, int *nextid); 87void *idr_get_next(struct idr *, int *nextid);
111void *idr_replace(struct idr *idp, void *ptr, int id); 88void *idr_replace(struct idr *, void *, int id);
112void idr_remove(struct idr *idp, int id); 89void idr_destroy(struct idr *);
113void idr_destroy(struct idr *idp); 90
114void idr_init(struct idr *idp); 91static inline void *idr_remove(struct idr *idr, int id)
115bool idr_is_empty(struct idr *idp); 92{
93 return radix_tree_delete_item(&idr->idr_rt, id, NULL);
94}
95
96static inline void idr_init(struct idr *idr)
97{
98 INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
99 idr->idr_next = 0;
100}
101
102static inline bool idr_is_empty(const struct idr *idr)
103{
104 return radix_tree_empty(&idr->idr_rt) &&
105 radix_tree_tagged(&idr->idr_rt, IDR_FREE);
106}
116 107
117/** 108/**
118 * idr_preload_end - end preload section started with idr_preload() 109 * idr_preload_end - end preload section started with idr_preload()
@@ -137,19 +128,14 @@ static inline void idr_preload_end(void)
137 * This function can be called under rcu_read_lock(), given that the leaf 128 * This function can be called under rcu_read_lock(), given that the leaf
138 * pointers lifetimes are correctly managed. 129 * pointers lifetimes are correctly managed.
139 */ 130 */
140static inline void *idr_find(struct idr *idr, int id) 131static inline void *idr_find(const struct idr *idr, int id)
141{ 132{
142 struct idr_layer *hint = rcu_dereference_raw(idr->hint); 133 return radix_tree_lookup(&idr->idr_rt, id);
143
144 if (hint && (id & ~IDR_MASK) == hint->prefix)
145 return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
146
147 return idr_find_slowpath(idr, id);
148} 134}
149 135
150/** 136/**
151 * idr_for_each_entry - iterate over an idr's elements of a given type 137 * idr_for_each_entry - iterate over an idr's elements of a given type
152 * @idp: idr handle 138 * @idr: idr handle
153 * @entry: the type * to use as cursor 139 * @entry: the type * to use as cursor
154 * @id: id entry's key 140 * @id: id entry's key
155 * 141 *
@@ -157,57 +143,60 @@ static inline void *idr_find(struct idr *idr, int id)
157 * after normal terminatinon @entry is left with the value NULL. This 143 * after normal terminatinon @entry is left with the value NULL. This
158 * is convenient for a "not found" value. 144 * is convenient for a "not found" value.
159 */ 145 */
160#define idr_for_each_entry(idp, entry, id) \ 146#define idr_for_each_entry(idr, entry, id) \
161 for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) 147 for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
162 148
163/** 149/**
164 * idr_for_each_entry - continue iteration over an idr's elements of a given type 150 * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
165 * @idp: idr handle 151 * @idr: idr handle
166 * @entry: the type * to use as cursor 152 * @entry: the type * to use as cursor
167 * @id: id entry's key 153 * @id: id entry's key
168 * 154 *
169 * Continue to iterate over list of given type, continuing after 155 * Continue to iterate over list of given type, continuing after
170 * the current position. 156 * the current position.
171 */ 157 */
172#define idr_for_each_entry_continue(idp, entry, id) \ 158#define idr_for_each_entry_continue(idr, entry, id) \
173 for ((entry) = idr_get_next((idp), &(id)); \ 159 for ((entry) = idr_get_next((idr), &(id)); \
174 entry; \ 160 entry; \
175 ++id, (entry) = idr_get_next((idp), &(id))) 161 ++id, (entry) = idr_get_next((idr), &(id)))
176 162
177/* 163/*
178 * IDA - IDR based id allocator, use when translation from id to 164 * IDA - IDR based id allocator, use when translation from id to
179 * pointer isn't necessary. 165 * pointer isn't necessary.
180 *
181 * IDA_BITMAP_LONGS is calculated to be one less to accommodate
182 * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
183 */ 166 */
184#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ 167#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
185#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) 168#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
186#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) 169#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
187 170
188struct ida_bitmap { 171struct ida_bitmap {
189 long nr_busy;
190 unsigned long bitmap[IDA_BITMAP_LONGS]; 172 unsigned long bitmap[IDA_BITMAP_LONGS];
191}; 173};
192 174
175DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
176
193struct ida { 177struct ida {
194 struct idr idr; 178 struct radix_tree_root ida_rt;
195 struct ida_bitmap *free_bitmap;
196}; 179};
197 180
198#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, } 181#define IDA_INIT { \
199#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) 182 .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \
183}
184#define DEFINE_IDA(name) struct ida name = IDA_INIT
200 185
201int ida_pre_get(struct ida *ida, gfp_t gfp_mask); 186int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
202int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); 187int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
203void ida_remove(struct ida *ida, int id); 188void ida_remove(struct ida *ida, int id);
204void ida_destroy(struct ida *ida); 189void ida_destroy(struct ida *ida);
205void ida_init(struct ida *ida);
206 190
207int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, 191int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
208 gfp_t gfp_mask); 192 gfp_t gfp_mask);
209void ida_simple_remove(struct ida *ida, unsigned int id); 193void ida_simple_remove(struct ida *ida, unsigned int id);
210 194
195static inline void ida_init(struct ida *ida)
196{
197 INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
198}
199
211/** 200/**
212 * ida_get_new - allocate new ID 201 * ida_get_new - allocate new ID
213 * @ida: idr handle 202 * @ida: idr handle
@@ -220,11 +209,8 @@ static inline int ida_get_new(struct ida *ida, int *p_id)
220 return ida_get_new_above(ida, 0, p_id); 209 return ida_get_new_above(ida, 0, p_id);
221} 210}
222 211
223static inline bool ida_is_empty(struct ida *ida) 212static inline bool ida_is_empty(const struct ida *ida)
224{ 213{
225 return idr_is_empty(&ida->idr); 214 return radix_tree_empty(&ida->ida_rt);
226} 215}
227
228void __init idr_init_cache(void);
229
230#endif /* __IDR_H__ */ 216#endif /* __IDR_H__ */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 52bda854593b..3e5735064b71 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -22,11 +22,13 @@
22#define _LINUX_RADIX_TREE_H 22#define _LINUX_RADIX_TREE_H
23 23
24#include <linux/bitops.h> 24#include <linux/bitops.h>
25#include <linux/preempt.h>
26#include <linux/types.h>
27#include <linux/bug.h> 25#include <linux/bug.h>
28#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/list.h>
28#include <linux/preempt.h>
29#include <linux/rcupdate.h> 29#include <linux/rcupdate.h>
30#include <linux/spinlock.h>
31#include <linux/types.h>
30 32
31/* 33/*
32 * The bottom two bits of the slot determine how the remaining bits in the 34 * The bottom two bits of the slot determine how the remaining bits in the
@@ -94,7 +96,7 @@ struct radix_tree_node {
94 unsigned char count; /* Total entry count */ 96 unsigned char count; /* Total entry count */
95 unsigned char exceptional; /* Exceptional entry count */ 97 unsigned char exceptional; /* Exceptional entry count */
96 struct radix_tree_node *parent; /* Used when ascending tree */ 98 struct radix_tree_node *parent; /* Used when ascending tree */
97 void *private_data; /* For tree user */ 99 struct radix_tree_root *root; /* The tree we belong to */
98 union { 100 union {
99 struct list_head private_list; /* For tree user */ 101 struct list_head private_list; /* For tree user */
100 struct rcu_head rcu_head; /* Used when freeing node */ 102 struct rcu_head rcu_head; /* Used when freeing node */
@@ -103,7 +105,10 @@ struct radix_tree_node {
103 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; 105 unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
104}; 106};
105 107
106/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ 108/* The top bits of gfp_mask are used to store the root tags and the IDR flag */
109#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT))
110#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1)
111
107struct radix_tree_root { 112struct radix_tree_root {
108 gfp_t gfp_mask; 113 gfp_t gfp_mask;
109 struct radix_tree_node __rcu *rnode; 114 struct radix_tree_node __rcu *rnode;
@@ -123,7 +128,7 @@ do { \
123 (root)->rnode = NULL; \ 128 (root)->rnode = NULL; \
124} while (0) 129} while (0)
125 130
126static inline bool radix_tree_empty(struct radix_tree_root *root) 131static inline bool radix_tree_empty(const struct radix_tree_root *root)
127{ 132{
128 return root->rnode == NULL; 133 return root->rnode == NULL;
129} 134}
@@ -216,10 +221,8 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
216 */ 221 */
217 222
218/** 223/**
219 * radix_tree_deref_slot - dereference a slot 224 * radix_tree_deref_slot - dereference a slot
220 * @pslot: pointer to slot, returned by radix_tree_lookup_slot 225 * @slot: slot pointer, returned by radix_tree_lookup_slot
221 * Returns: item that was stored in that slot with any direct pointer flag
222 * removed.
223 * 226 *
224 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read 227 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
225 * locked across slot lookup and dereference. Not required if write lock is 228 * locked across slot lookup and dereference. Not required if write lock is
@@ -227,26 +230,27 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
227 * 230 *
228 * radix_tree_deref_retry must be used to confirm validity of the pointer if 231 * radix_tree_deref_retry must be used to confirm validity of the pointer if
229 * only the read lock is held. 232 * only the read lock is held.
233 *
234 * Return: entry stored in that slot.
230 */ 235 */
231static inline void *radix_tree_deref_slot(void **pslot) 236static inline void *radix_tree_deref_slot(void __rcu **slot)
232{ 237{
233 return rcu_dereference(*pslot); 238 return rcu_dereference(*slot);
234} 239}
235 240
236/** 241/**
237 * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held 242 * radix_tree_deref_slot_protected - dereference a slot with tree lock held
238 * @pslot: pointer to slot, returned by radix_tree_lookup_slot 243 * @slot: slot pointer, returned by radix_tree_lookup_slot
239 * Returns: item that was stored in that slot with any direct pointer flag 244 *
240 * removed. 245 * Similar to radix_tree_deref_slot. The caller does not hold the RCU read
241 * 246 * lock but it must hold the tree lock to prevent parallel updates.
242 * Similar to radix_tree_deref_slot but only used during migration when a pages 247 *
243 * mapping is being moved. The caller does not hold the RCU read lock but it 248 * Return: entry stored in that slot.
244 * must hold the tree lock to prevent parallel updates.
245 */ 249 */
246static inline void *radix_tree_deref_slot_protected(void **pslot, 250static inline void *radix_tree_deref_slot_protected(void __rcu **slot,
247 spinlock_t *treelock) 251 spinlock_t *treelock)
248{ 252{
249 return rcu_dereference_protected(*pslot, lockdep_is_held(treelock)); 253 return rcu_dereference_protected(*slot, lockdep_is_held(treelock));
250} 254}
251 255
252/** 256/**
@@ -282,9 +286,9 @@ static inline int radix_tree_exception(void *arg)
282 return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); 286 return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
283} 287}
284 288
285int __radix_tree_create(struct radix_tree_root *root, unsigned long index, 289int __radix_tree_create(struct radix_tree_root *, unsigned long index,
286 unsigned order, struct radix_tree_node **nodep, 290 unsigned order, struct radix_tree_node **nodep,
287 void ***slotp); 291 void __rcu ***slotp);
288int __radix_tree_insert(struct radix_tree_root *, unsigned long index, 292int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
289 unsigned order, void *); 293 unsigned order, void *);
290static inline int radix_tree_insert(struct radix_tree_root *root, 294static inline int radix_tree_insert(struct radix_tree_root *root,
@@ -292,55 +296,56 @@ static inline int radix_tree_insert(struct radix_tree_root *root,
292{ 296{
293 return __radix_tree_insert(root, index, 0, entry); 297 return __radix_tree_insert(root, index, 0, entry);
294} 298}
295void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, 299void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
296 struct radix_tree_node **nodep, void ***slotp); 300 struct radix_tree_node **nodep, void __rcu ***slotp);
297void *radix_tree_lookup(struct radix_tree_root *, unsigned long); 301void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
298void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); 302void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
303 unsigned long index);
299typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *); 304typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *);
300void __radix_tree_replace(struct radix_tree_root *root, 305void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
301 struct radix_tree_node *node, 306 void __rcu **slot, void *entry,
302 void **slot, void *item,
303 radix_tree_update_node_t update_node, void *private); 307 radix_tree_update_node_t update_node, void *private);
304void radix_tree_iter_replace(struct radix_tree_root *, 308void radix_tree_iter_replace(struct radix_tree_root *,
305 const struct radix_tree_iter *, void **slot, void *item); 309 const struct radix_tree_iter *, void __rcu **slot, void *entry);
306void radix_tree_replace_slot(struct radix_tree_root *root, 310void radix_tree_replace_slot(struct radix_tree_root *,
307 void **slot, void *item); 311 void __rcu **slot, void *entry);
308void __radix_tree_delete_node(struct radix_tree_root *root, 312void __radix_tree_delete_node(struct radix_tree_root *,
309 struct radix_tree_node *node, 313 struct radix_tree_node *,
310 radix_tree_update_node_t update_node, 314 radix_tree_update_node_t update_node,
311 void *private); 315 void *private);
316void radix_tree_iter_delete(struct radix_tree_root *,
317 struct radix_tree_iter *iter, void __rcu **slot);
312void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); 318void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
313void *radix_tree_delete(struct radix_tree_root *, unsigned long); 319void *radix_tree_delete(struct radix_tree_root *, unsigned long);
314void radix_tree_clear_tags(struct radix_tree_root *root, 320void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *,
315 struct radix_tree_node *node, 321 void __rcu **slot);
316 void **slot); 322unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
317unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
318 void **results, unsigned long first_index, 323 void **results, unsigned long first_index,
319 unsigned int max_items); 324 unsigned int max_items);
320unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, 325unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
321 void ***results, unsigned long *indices, 326 void __rcu ***results, unsigned long *indices,
322 unsigned long first_index, unsigned int max_items); 327 unsigned long first_index, unsigned int max_items);
323int radix_tree_preload(gfp_t gfp_mask); 328int radix_tree_preload(gfp_t gfp_mask);
324int radix_tree_maybe_preload(gfp_t gfp_mask); 329int radix_tree_maybe_preload(gfp_t gfp_mask);
325int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); 330int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
326void radix_tree_init(void); 331void radix_tree_init(void);
327void *radix_tree_tag_set(struct radix_tree_root *root, 332void *radix_tree_tag_set(struct radix_tree_root *,
328 unsigned long index, unsigned int tag); 333 unsigned long index, unsigned int tag);
329void *radix_tree_tag_clear(struct radix_tree_root *root, 334void *radix_tree_tag_clear(struct radix_tree_root *,
330 unsigned long index, unsigned int tag); 335 unsigned long index, unsigned int tag);
331int radix_tree_tag_get(struct radix_tree_root *root, 336int radix_tree_tag_get(const struct radix_tree_root *,
332 unsigned long index, unsigned int tag); 337 unsigned long index, unsigned int tag);
333void radix_tree_iter_tag_set(struct radix_tree_root *root, 338void radix_tree_iter_tag_set(struct radix_tree_root *,
339 const struct radix_tree_iter *iter, unsigned int tag);
340void radix_tree_iter_tag_clear(struct radix_tree_root *,
334 const struct radix_tree_iter *iter, unsigned int tag); 341 const struct radix_tree_iter *iter, unsigned int tag);
335unsigned int 342unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
336radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, 343 void **results, unsigned long first_index,
337 unsigned long first_index, unsigned int max_items, 344 unsigned int max_items, unsigned int tag);
338 unsigned int tag); 345unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
339unsigned int 346 void __rcu ***results, unsigned long first_index,
340radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, 347 unsigned int max_items, unsigned int tag);
341 unsigned long first_index, unsigned int max_items, 348int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
342 unsigned int tag);
343int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
344 349
345static inline void radix_tree_preload_end(void) 350static inline void radix_tree_preload_end(void)
346{ 351{
@@ -352,10 +357,14 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index,
352 unsigned new_order); 357 unsigned new_order);
353int radix_tree_join(struct radix_tree_root *, unsigned long index, 358int radix_tree_join(struct radix_tree_root *, unsigned long index,
354 unsigned new_order, void *); 359 unsigned new_order, void *);
360void __rcu **idr_get_free(struct radix_tree_root *, struct radix_tree_iter *,
361 gfp_t, int end);
355 362
356#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ 363enum {
357#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ 364 RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
358#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ 365 RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */
366 RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */
367};
359 368
360/** 369/**
361 * radix_tree_iter_init - initialize radix tree iterator 370 * radix_tree_iter_init - initialize radix tree iterator
@@ -364,7 +373,7 @@ int radix_tree_join(struct radix_tree_root *, unsigned long index,
364 * @start: iteration starting index 373 * @start: iteration starting index
365 * Returns: NULL 374 * Returns: NULL
366 */ 375 */
367static __always_inline void ** 376static __always_inline void __rcu **
368radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) 377radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
369{ 378{
370 /* 379 /*
@@ -393,10 +402,46 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
393 * Also it fills @iter with data about chunk: position in the tree (index), 402 * Also it fills @iter with data about chunk: position in the tree (index),
394 * its end (next_index), and constructs a bit mask for tagged iterating (tags). 403 * its end (next_index), and constructs a bit mask for tagged iterating (tags).
395 */ 404 */
396void **radix_tree_next_chunk(struct radix_tree_root *root, 405void __rcu **radix_tree_next_chunk(const struct radix_tree_root *,
397 struct radix_tree_iter *iter, unsigned flags); 406 struct radix_tree_iter *iter, unsigned flags);
398 407
399/** 408/**
409 * radix_tree_iter_lookup - look up an index in the radix tree
410 * @root: radix tree root
411 * @iter: iterator state
412 * @index: key to look up
413 *
414 * If @index is present in the radix tree, this function returns the slot
415 * containing it and updates @iter to describe the entry. If @index is not
416 * present, it returns NULL.
417 */
418static inline void __rcu **
419radix_tree_iter_lookup(const struct radix_tree_root *root,
420 struct radix_tree_iter *iter, unsigned long index)
421{
422 radix_tree_iter_init(iter, index);
423 return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
424}
425
426/**
427 * radix_tree_iter_find - find a present entry
428 * @root: radix tree root
429 * @iter: iterator state
430 * @index: start location
431 *
432 * This function returns the slot containing the entry with the lowest index
433 * which is at least @index. If @index is larger than any present entry, this
434 * function returns NULL. The @iter is updated to describe the entry found.
435 */
436static inline void __rcu **
437radix_tree_iter_find(const struct radix_tree_root *root,
438 struct radix_tree_iter *iter, unsigned long index)
439{
440 radix_tree_iter_init(iter, index);
441 return radix_tree_next_chunk(root, iter, 0);
442}
443
444/**
400 * radix_tree_iter_retry - retry this chunk of the iteration 445 * radix_tree_iter_retry - retry this chunk of the iteration
401 * @iter: iterator state 446 * @iter: iterator state
402 * 447 *
@@ -406,7 +451,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
406 * and continue the iteration. 451 * and continue the iteration.
407 */ 452 */
408static inline __must_check 453static inline __must_check
409void **radix_tree_iter_retry(struct radix_tree_iter *iter) 454void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
410{ 455{
411 iter->next_index = iter->index; 456 iter->next_index = iter->index;
412 iter->tags = 0; 457 iter->tags = 0;
@@ -429,7 +474,7 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
429 * have been invalidated by an insertion or deletion. Call this function 474 * have been invalidated by an insertion or deletion. Call this function
430 * before releasing the lock to continue the iteration from the next index. 475 * before releasing the lock to continue the iteration from the next index.
431 */ 476 */
432void **__must_check radix_tree_iter_resume(void **slot, 477void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
433 struct radix_tree_iter *iter); 478 struct radix_tree_iter *iter);
434 479
435/** 480/**
@@ -445,11 +490,11 @@ radix_tree_chunk_size(struct radix_tree_iter *iter)
445} 490}
446 491
447#ifdef CONFIG_RADIX_TREE_MULTIORDER 492#ifdef CONFIG_RADIX_TREE_MULTIORDER
448void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, 493void __rcu **__radix_tree_next_slot(void __rcu **slot,
449 unsigned flags); 494 struct radix_tree_iter *iter, unsigned flags);
450#else 495#else
451/* Can't happen without sibling entries, but the compiler can't tell that */ 496/* Can't happen without sibling entries, but the compiler can't tell that */
452static inline void ** __radix_tree_next_slot(void **slot, 497static inline void __rcu **__radix_tree_next_slot(void __rcu **slot,
453 struct radix_tree_iter *iter, unsigned flags) 498 struct radix_tree_iter *iter, unsigned flags)
454{ 499{
455 return slot; 500 return slot;
@@ -475,8 +520,8 @@ static inline void ** __radix_tree_next_slot(void **slot,
475 * b) we are doing non-tagged iteration, and iter->index and iter->next_index 520 * b) we are doing non-tagged iteration, and iter->index and iter->next_index
476 * have been set up so that radix_tree_chunk_size() returns 1 or 0. 521 * have been set up so that radix_tree_chunk_size() returns 1 or 0.
477 */ 522 */
478static __always_inline void ** 523static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
479radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) 524 struct radix_tree_iter *iter, unsigned flags)
480{ 525{
481 if (flags & RADIX_TREE_ITER_TAGGED) { 526 if (flags & RADIX_TREE_ITER_TAGGED) {
482 iter->tags >>= 1; 527 iter->tags >>= 1;
@@ -514,7 +559,7 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
514 return NULL; 559 return NULL;
515 560
516 found: 561 found:
517 if (unlikely(radix_tree_is_internal_node(*slot))) 562 if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot))))
518 return __radix_tree_next_slot(slot, iter, flags); 563 return __radix_tree_next_slot(slot, iter, flags);
519 return slot; 564 return slot;
520} 565}
diff --git a/init/main.c b/init/main.c
index 47ea22d181ef..ae9f2008fb86 100644
--- a/init/main.c
+++ b/init/main.c
@@ -554,7 +554,7 @@ asmlinkage __visible void __init start_kernel(void)
554 if (WARN(!irqs_disabled(), 554 if (WARN(!irqs_disabled(),
555 "Interrupts were enabled *very* early, fixing it\n")) 555 "Interrupts were enabled *very* early, fixing it\n"))
556 local_irq_disable(); 556 local_irq_disable();
557 idr_init_cache(); 557 radix_tree_init();
558 558
559 /* 559 /*
560 * Allow workqueue creation and work item queueing/cancelling 560 * Allow workqueue creation and work item queueing/cancelling
@@ -569,7 +569,6 @@ asmlinkage __visible void __init start_kernel(void)
569 trace_init(); 569 trace_init();
570 570
571 context_tracking_init(); 571 context_tracking_init();
572 radix_tree_init();
573 /* init some links before init_ISA_irqs() */ 572 /* init some links before init_ISA_irqs() */
574 early_irq_init(); 573 early_irq_init();
575 init_IRQ(); 574 init_IRQ();
diff --git a/lib/Makefile b/lib/Makefile
index 469b2392893a..320ac46a8725 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -25,6 +25,9 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
25 earlycpio.o seq_buf.o siphash.o \ 25 earlycpio.o seq_buf.o siphash.o \
26 nmi_backtrace.o nodemask.o win_minmax.o 26 nmi_backtrace.o nodemask.o win_minmax.o
27 27
28CFLAGS_radix-tree.o += -DCONFIG_SPARSE_RCU_POINTER
29CFLAGS_idr.o += -DCONFIG_SPARSE_RCU_POINTER
30
28lib-$(CONFIG_MMU) += ioremap.o 31lib-$(CONFIG_MMU) += ioremap.o
29lib-$(CONFIG_SMP) += cpumask.o 32lib-$(CONFIG_SMP) += cpumask.o
30lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o 33lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o
diff --git a/lib/idr.c b/lib/idr.c
index 52d2979a05e8..b13682bb0a1c 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -1,1068 +1,409 @@
1/* 1#include <linux/bitmap.h>
2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
5 *
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
8 *
9 * Modified by Nadia Derbey to make it RCU safe.
10 *
11 * Small id to pointer translation service.
12 *
13 * It uses a radix tree like structure as a sparse array indexed
14 * by the id to obtain the pointer. The bitmap makes allocating
15 * a new id quick.
16 *
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
21 */
22
23#ifndef TEST // to test in user space...
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/export.h> 2#include <linux/export.h>
27#endif
28#include <linux/err.h>
29#include <linux/string.h>
30#include <linux/idr.h> 3#include <linux/idr.h>
4#include <linux/slab.h>
31#include <linux/spinlock.h> 5#include <linux/spinlock.h>
32#include <linux/percpu.h>
33
34#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
35#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
36
37/* Leave the possibility of an incomplete final layer */
38#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
39 6
40/* Number of id_layer structs to leave in free list */ 7DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
41#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
42
43static struct kmem_cache *idr_layer_cache;
44static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
45static DEFINE_PER_CPU(int, idr_preload_cnt);
46static DEFINE_SPINLOCK(simple_ida_lock); 8static DEFINE_SPINLOCK(simple_ida_lock);
47 9
48/* the maximum ID which can be allocated given idr->layers */
49static int idr_max(int layers)
50{
51 int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
52
53 return (1 << bits) - 1;
54}
55
56/*
57 * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is
58 * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and
59 * so on.
60 */
61static int idr_layer_prefix_mask(int layer)
62{
63 return ~idr_max(layer + 1);
64}
65
66static struct idr_layer *get_from_free_list(struct idr *idp)
67{
68 struct idr_layer *p;
69 unsigned long flags;
70
71 spin_lock_irqsave(&idp->lock, flags);
72 if ((p = idp->id_free)) {
73 idp->id_free = p->ary[0];
74 idp->id_free_cnt--;
75 p->ary[0] = NULL;
76 }
77 spin_unlock_irqrestore(&idp->lock, flags);
78 return(p);
79}
80
81/** 10/**
82 * idr_layer_alloc - allocate a new idr_layer 11 * idr_alloc - allocate an id
83 * @gfp_mask: allocation mask 12 * @idr: idr handle
84 * @layer_idr: optional idr to allocate from
85 *
86 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
87 * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
88 * an idr_layer from @idr->id_free.
89 *
90 * @layer_idr is to maintain backward compatibility with the old alloc
91 * interface - idr_pre_get() and idr_get_new*() - and will be removed
92 * together with per-pool preload buffer.
93 */
94static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
95{
96 struct idr_layer *new;
97
98 /* this is the old path, bypass to get_from_free_list() */
99 if (layer_idr)
100 return get_from_free_list(layer_idr);
101
102 /*
103 * Try to allocate directly from kmem_cache. We want to try this
104 * before preload buffer; otherwise, non-preloading idr_alloc()
105 * users will end up taking advantage of preloading ones. As the
106 * following is allowed to fail for preloaded cases, suppress
107 * warning this time.
108 */
109 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
110 if (new)
111 return new;
112
113 /*
114 * Try to fetch one from the per-cpu preload buffer if in process
115 * context. See idr_preload() for details.
116 */
117 if (!in_interrupt()) {
118 preempt_disable();
119 new = __this_cpu_read(idr_preload_head);
120 if (new) {
121 __this_cpu_write(idr_preload_head, new->ary[0]);
122 __this_cpu_dec(idr_preload_cnt);
123 new->ary[0] = NULL;
124 }
125 preempt_enable();
126 if (new)
127 return new;
128 }
129
130 /*
131 * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
132 * that memory allocation failure warning is printed as intended.
133 */
134 return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
135}
136
137static void idr_layer_rcu_free(struct rcu_head *head)
138{
139 struct idr_layer *layer;
140
141 layer = container_of(head, struct idr_layer, rcu_head);
142 kmem_cache_free(idr_layer_cache, layer);
143}
144
145static inline void free_layer(struct idr *idr, struct idr_layer *p)
146{
147 if (idr->hint == p)
148 RCU_INIT_POINTER(idr->hint, NULL);
149 call_rcu(&p->rcu_head, idr_layer_rcu_free);
150}
151
152/* only called when idp->lock is held */
153static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
154{
155 p->ary[0] = idp->id_free;
156 idp->id_free = p;
157 idp->id_free_cnt++;
158}
159
160static void move_to_free_list(struct idr *idp, struct idr_layer *p)
161{
162 unsigned long flags;
163
164 /*
165 * Depends on the return element being zeroed.
166 */
167 spin_lock_irqsave(&idp->lock, flags);
168 __move_to_free_list(idp, p);
169 spin_unlock_irqrestore(&idp->lock, flags);
170}
171
172static void idr_mark_full(struct idr_layer **pa, int id)
173{
174 struct idr_layer *p = pa[0];
175 int l = 0;
176
177 __set_bit(id & IDR_MASK, p->bitmap);
178 /*
179 * If this layer is full mark the bit in the layer above to
180 * show that this part of the radix tree is full. This may
181 * complete the layer above and require walking up the radix
182 * tree.
183 */
184 while (bitmap_full(p->bitmap, IDR_SIZE)) {
185 if (!(p = pa[++l]))
186 break;
187 id = id >> IDR_BITS;
188 __set_bit((id & IDR_MASK), p->bitmap);
189 }
190}
191
192static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
193{
194 while (idp->id_free_cnt < MAX_IDR_FREE) {
195 struct idr_layer *new;
196 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
197 if (new == NULL)
198 return (0);
199 move_to_free_list(idp, new);
200 }
201 return 1;
202}
203
204/**
205 * sub_alloc - try to allocate an id without growing the tree depth
206 * @idp: idr handle
207 * @starting_id: id to start search at
208 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
209 * @gfp_mask: allocation mask for idr_layer_alloc()
210 * @layer_idr: optional idr passed to idr_layer_alloc()
211 *
212 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
213 * growing its depth. Returns
214 *
215 * the allocated id >= 0 if successful,
216 * -EAGAIN if the tree needs to grow for allocation to succeed,
217 * -ENOSPC if the id space is exhausted,
218 * -ENOMEM if more idr_layers need to be allocated.
219 */
220static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
221 gfp_t gfp_mask, struct idr *layer_idr)
222{
223 int n, m, sh;
224 struct idr_layer *p, *new;
225 int l, id, oid;
226
227 id = *starting_id;
228 restart:
229 p = idp->top;
230 l = idp->layers;
231 pa[l--] = NULL;
232 while (1) {
233 /*
234 * We run around this while until we reach the leaf node...
235 */
236 n = (id >> (IDR_BITS*l)) & IDR_MASK;
237 m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
238 if (m == IDR_SIZE) {
239 /* no space available go back to previous layer. */
240 l++;
241 oid = id;
242 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
243
244 /* if already at the top layer, we need to grow */
245 if (id > idr_max(idp->layers)) {
246 *starting_id = id;
247 return -EAGAIN;
248 }
249 p = pa[l];
250 BUG_ON(!p);
251
252 /* If we need to go up one layer, continue the
253 * loop; otherwise, restart from the top.
254 */
255 sh = IDR_BITS * (l + 1);
256 if (oid >> sh == id >> sh)
257 continue;
258 else
259 goto restart;
260 }
261 if (m != n) {
262 sh = IDR_BITS*l;
263 id = ((id >> sh) ^ n ^ m) << sh;
264 }
265 if ((id >= MAX_IDR_BIT) || (id < 0))
266 return -ENOSPC;
267 if (l == 0)
268 break;
269 /*
270 * Create the layer below if it is missing.
271 */
272 if (!p->ary[m]) {
273 new = idr_layer_alloc(gfp_mask, layer_idr);
274 if (!new)
275 return -ENOMEM;
276 new->layer = l-1;
277 new->prefix = id & idr_layer_prefix_mask(new->layer);
278 rcu_assign_pointer(p->ary[m], new);
279 p->count++;
280 }
281 pa[l--] = p;
282 p = p->ary[m];
283 }
284
285 pa[l] = p;
286 return id;
287}
288
289static int idr_get_empty_slot(struct idr *idp, int starting_id,
290 struct idr_layer **pa, gfp_t gfp_mask,
291 struct idr *layer_idr)
292{
293 struct idr_layer *p, *new;
294 int layers, v, id;
295 unsigned long flags;
296
297 id = starting_id;
298build_up:
299 p = idp->top;
300 layers = idp->layers;
301 if (unlikely(!p)) {
302 if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
303 return -ENOMEM;
304 p->layer = 0;
305 layers = 1;
306 }
307 /*
308 * Add a new layer to the top of the tree if the requested
309 * id is larger than the currently allocated space.
310 */
311 while (id > idr_max(layers)) {
312 layers++;
313 if (!p->count) {
314 /* special case: if the tree is currently empty,
315 * then we grow the tree by moving the top node
316 * upwards.
317 */
318 p->layer++;
319 WARN_ON_ONCE(p->prefix);
320 continue;
321 }
322 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
323 /*
324 * The allocation failed. If we built part of
325 * the structure tear it down.
326 */
327 spin_lock_irqsave(&idp->lock, flags);
328 for (new = p; p && p != idp->top; new = p) {
329 p = p->ary[0];
330 new->ary[0] = NULL;
331 new->count = 0;
332 bitmap_clear(new->bitmap, 0, IDR_SIZE);
333 __move_to_free_list(idp, new);
334 }
335 spin_unlock_irqrestore(&idp->lock, flags);
336 return -ENOMEM;
337 }
338 new->ary[0] = p;
339 new->count = 1;
340 new->layer = layers-1;
341 new->prefix = id & idr_layer_prefix_mask(new->layer);
342 if (bitmap_full(p->bitmap, IDR_SIZE))
343 __set_bit(0, new->bitmap);
344 p = new;
345 }
346 rcu_assign_pointer(idp->top, p);
347 idp->layers = layers;
348 v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
349 if (v == -EAGAIN)
350 goto build_up;
351 return(v);
352}
353
354/*
355 * @id and @pa are from a successful allocation from idr_get_empty_slot().
356 * Install the user pointer @ptr and mark the slot full.
357 */
358static void idr_fill_slot(struct idr *idr, void *ptr, int id,
359 struct idr_layer **pa)
360{
361 /* update hint used for lookup, cleared from free_layer() */
362 rcu_assign_pointer(idr->hint, pa[0]);
363
364 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
365 pa[0]->count++;
366 idr_mark_full(pa, id);
367}
368
369
370/**
371 * idr_preload - preload for idr_alloc()
372 * @gfp_mask: allocation mask to use for preloading
373 *
374 * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
375 * process context and each idr_preload() invocation should be matched with
376 * idr_preload_end(). Note that preemption is disabled while preloaded.
377 *
378 * The first idr_alloc() in the preloaded section can be treated as if it
379 * were invoked with @gfp_mask used for preloading. This allows using more
380 * permissive allocation masks for idrs protected by spinlocks.
381 *
382 * For example, if idr_alloc() below fails, the failure can be treated as
383 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
384 *
385 * idr_preload(GFP_KERNEL);
386 * spin_lock(lock);
387 *
388 * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
389 *
390 * spin_unlock(lock);
391 * idr_preload_end();
392 * if (id < 0)
393 * error;
394 */
395void idr_preload(gfp_t gfp_mask)
396{
397 /*
398 * Consuming preload buffer from non-process context breaks preload
399 * allocation guarantee. Disallow usage from those contexts.
400 */
401 WARN_ON_ONCE(in_interrupt());
402 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
403
404 preempt_disable();
405
406 /*
407 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
408 * return value from idr_alloc() needs to be checked for failure
409 * anyway. Silently give up if allocation fails. The caller can
410 * treat failures from idr_alloc() as if idr_alloc() were called
411 * with @gfp_mask which should be enough.
412 */
413 while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
414 struct idr_layer *new;
415
416 preempt_enable();
417 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
418 preempt_disable();
419 if (!new)
420 break;
421
422 /* link the new one to per-cpu preload list */
423 new->ary[0] = __this_cpu_read(idr_preload_head);
424 __this_cpu_write(idr_preload_head, new);
425 __this_cpu_inc(idr_preload_cnt);
426 }
427}
428EXPORT_SYMBOL(idr_preload);
429
430/**
431 * idr_alloc - allocate new idr entry
432 * @idr: the (initialized) idr
433 * @ptr: pointer to be associated with the new id 13 * @ptr: pointer to be associated with the new id
434 * @start: the minimum id (inclusive) 14 * @start: the minimum id (inclusive)
435 * @end: the maximum id (exclusive, <= 0 for max) 15 * @end: the maximum id (exclusive)
436 * @gfp_mask: memory allocation flags 16 * @gfp: memory allocation flags
437 * 17 *
438 * Allocate an id in [start, end) and associate it with @ptr. If no ID is 18 * Allocates an unused ID in the range [start, end). Returns -ENOSPC
439 * available in the specified range, returns -ENOSPC. On memory allocation 19 * if there are no unused IDs in that range.
440 * failure, returns -ENOMEM.
441 * 20 *
442 * Note that @end is treated as max when <= 0. This is to always allow 21 * Note that @end is treated as max when <= 0. This is to always allow
443 * using @start + N as @end as long as N is inside integer range. 22 * using @start + N as @end as long as N is inside integer range.
444 * 23 *
445 * The user is responsible for exclusively synchronizing all operations 24 * Simultaneous modifications to the @idr are not allowed and should be
446 * which may modify @idr. However, read-only accesses such as idr_find() 25 * prevented by the user, usually with a lock. idr_alloc() may be called
447 * or iteration can be performed under RCU read lock provided the user 26 * concurrently with read-only accesses to the @idr, such as idr_find() and
448 * destroys @ptr in RCU-safe way after removal from idr. 27 * idr_for_each_entry().
449 */ 28 */
450int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) 29int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
451{ 30{
452 int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ 31 void __rcu **slot;
453 struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 32 struct radix_tree_iter iter;
454 int id;
455 33
456 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
457
458 /* sanity checks */
459 if (WARN_ON_ONCE(start < 0)) 34 if (WARN_ON_ONCE(start < 0))
460 return -EINVAL; 35 return -EINVAL;
461 if (unlikely(max < start)) 36 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
462 return -ENOSPC; 37 return -EINVAL;
463 38
464 /* allocate id */ 39 radix_tree_iter_init(&iter, start);
465 id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); 40 slot = idr_get_free(&idr->idr_rt, &iter, gfp, end);
466 if (unlikely(id < 0)) 41 if (IS_ERR(slot))
467 return id; 42 return PTR_ERR(slot);
468 if (unlikely(id > max))
469 return -ENOSPC;
470 43
471 idr_fill_slot(idr, ptr, id, pa); 44 radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
472 return id; 45 radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
46 return iter.index;
473} 47}
474EXPORT_SYMBOL_GPL(idr_alloc); 48EXPORT_SYMBOL_GPL(idr_alloc);
475 49
476/** 50/**
477 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion 51 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
478 * @idr: the (initialized) idr 52 * @idr: idr handle
479 * @ptr: pointer to be associated with the new id 53 * @ptr: pointer to be associated with the new id
480 * @start: the minimum id (inclusive) 54 * @start: the minimum id (inclusive)
481 * @end: the maximum id (exclusive, <= 0 for max) 55 * @end: the maximum id (exclusive)
482 * @gfp_mask: memory allocation flags 56 * @gfp: memory allocation flags
483 *
484 * Essentially the same as idr_alloc, but prefers to allocate progressively
485 * higher ids if it can. If the "cur" counter wraps, then it will start again
486 * at the "start" end of the range and allocate one that has already been used.
487 */
488int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
489 gfp_t gfp_mask)
490{
491 int id;
492
493 id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
494 if (id == -ENOSPC)
495 id = idr_alloc(idr, ptr, start, end, gfp_mask);
496
497 if (likely(id >= 0))
498 idr->cur = id + 1;
499 return id;
500}
501EXPORT_SYMBOL(idr_alloc_cyclic);
502
503static void idr_remove_warning(int id)
504{
505 WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
506}
507
508static void sub_remove(struct idr *idp, int shift, int id)
509{
510 struct idr_layer *p = idp->top;
511 struct idr_layer **pa[MAX_IDR_LEVEL + 1];
512 struct idr_layer ***paa = &pa[0];
513 struct idr_layer *to_free;
514 int n;
515
516 *paa = NULL;
517 *++paa = &idp->top;
518
519 while ((shift > 0) && p) {
520 n = (id >> shift) & IDR_MASK;
521 __clear_bit(n, p->bitmap);
522 *++paa = &p->ary[n];
523 p = p->ary[n];
524 shift -= IDR_BITS;
525 }
526 n = id & IDR_MASK;
527 if (likely(p != NULL && test_bit(n, p->bitmap))) {
528 __clear_bit(n, p->bitmap);
529 RCU_INIT_POINTER(p->ary[n], NULL);
530 to_free = NULL;
531 while(*paa && ! --((**paa)->count)){
532 if (to_free)
533 free_layer(idp, to_free);
534 to_free = **paa;
535 **paa-- = NULL;
536 }
537 if (!*paa)
538 idp->layers = 0;
539 if (to_free)
540 free_layer(idp, to_free);
541 } else
542 idr_remove_warning(id);
543}
544
545/**
546 * idr_remove - remove the given id and free its slot
547 * @idp: idr handle
548 * @id: unique key
549 */
550void idr_remove(struct idr *idp, int id)
551{
552 struct idr_layer *p;
553 struct idr_layer *to_free;
554
555 if (id < 0)
556 return;
557
558 if (id > idr_max(idp->layers)) {
559 idr_remove_warning(id);
560 return;
561 }
562
563 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
564 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
565 idp->top->ary[0]) {
566 /*
567 * Single child at leftmost slot: we can shrink the tree.
568 * This level is not needed anymore since when layers are
569 * inserted, they are inserted at the top of the existing
570 * tree.
571 */
572 to_free = idp->top;
573 p = idp->top->ary[0];
574 rcu_assign_pointer(idp->top, p);
575 --idp->layers;
576 to_free->count = 0;
577 bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
578 free_layer(idp, to_free);
579 }
580}
581EXPORT_SYMBOL(idr_remove);
582
583static void __idr_remove_all(struct idr *idp)
584{
585 int n, id, max;
586 int bt_mask;
587 struct idr_layer *p;
588 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
589 struct idr_layer **paa = &pa[0];
590
591 n = idp->layers * IDR_BITS;
592 *paa = idp->top;
593 RCU_INIT_POINTER(idp->top, NULL);
594 max = idr_max(idp->layers);
595
596 id = 0;
597 while (id >= 0 && id <= max) {
598 p = *paa;
599 while (n > IDR_BITS && p) {
600 n -= IDR_BITS;
601 p = p->ary[(id >> n) & IDR_MASK];
602 *++paa = p;
603 }
604
605 bt_mask = id;
606 id += 1 << n;
607 /* Get the highest bit that the above add changed from 0->1. */
608 while (n < fls(id ^ bt_mask)) {
609 if (*paa)
610 free_layer(idp, *paa);
611 n += IDR_BITS;
612 --paa;
613 }
614 }
615 idp->layers = 0;
616}
617
618/**
619 * idr_destroy - release all cached layers within an idr tree
620 * @idp: idr handle
621 *
622 * Free all id mappings and all idp_layers. After this function, @idp is
623 * completely unused and can be freed / recycled. The caller is
624 * responsible for ensuring that no one else accesses @idp during or after
625 * idr_destroy().
626 * 57 *
627 * A typical clean-up sequence for objects stored in an idr tree will use 58 * Allocates an ID larger than the last ID allocated if one is available.
628 * idr_for_each() to free all objects, if necessary, then idr_destroy() to 59 * If not, it will attempt to allocate the smallest ID that is larger or
629 * free up the id mappings and cached idr_layers. 60 * equal to @start.
630 */ 61 */
631void idr_destroy(struct idr *idp) 62int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
632{ 63{
633 __idr_remove_all(idp); 64 int id, curr = idr->idr_next;
634 65
635 while (idp->id_free_cnt) { 66 if (curr < start)
636 struct idr_layer *p = get_from_free_list(idp); 67 curr = start;
637 kmem_cache_free(idr_layer_cache, p);
638 }
639}
640EXPORT_SYMBOL(idr_destroy);
641 68
642void *idr_find_slowpath(struct idr *idp, int id) 69 id = idr_alloc(idr, ptr, curr, end, gfp);
643{ 70 if ((id == -ENOSPC) && (curr > start))
644 int n; 71 id = idr_alloc(idr, ptr, start, curr, gfp);
645 struct idr_layer *p;
646
647 if (id < 0)
648 return NULL;
649
650 p = rcu_dereference_raw(idp->top);
651 if (!p)
652 return NULL;
653 n = (p->layer+1) * IDR_BITS;
654 72
655 if (id > idr_max(p->layer + 1)) 73 if (id >= 0)
656 return NULL; 74 idr->idr_next = id + 1U;
657 BUG_ON(n == 0);
658 75
659 while (n > 0 && p) { 76 return id;
660 n -= IDR_BITS;
661 BUG_ON(n != p->layer*IDR_BITS);
662 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
663 }
664 return((void *)p);
665} 77}
666EXPORT_SYMBOL(idr_find_slowpath); 78EXPORT_SYMBOL(idr_alloc_cyclic);
667 79
668/** 80/**
669 * idr_for_each - iterate through all stored pointers 81 * idr_for_each - iterate through all stored pointers
670 * @idp: idr handle 82 * @idr: idr handle
671 * @fn: function to be called for each pointer 83 * @fn: function to be called for each pointer
672 * @data: data passed back to callback function 84 * @data: data passed to callback function
673 * 85 *
674 * Iterate over the pointers registered with the given idr. The 86 * The callback function will be called for each entry in @idr, passing
675 * callback function will be called for each pointer currently 87 * the id, the pointer and the data pointer passed to this function.
676 * registered, passing the id, the pointer and the data pointer passed
677 * to this function. It is not safe to modify the idr tree while in
678 * the callback, so functions such as idr_get_new and idr_remove are
679 * not allowed.
680 * 88 *
681 * We check the return of @fn each time. If it returns anything other 89 * If @fn returns anything other than %0, the iteration stops and that
682 * than %0, we break out and return that value. 90 * value is returned from this function.
683 * 91 *
684 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). 92 * idr_for_each() can be called concurrently with idr_alloc() and
93 * idr_remove() if protected by RCU. Newly added entries may not be
94 * seen and deleted entries may be seen, but adding and removing entries
95 * will not cause other entries to be skipped, nor spurious ones to be seen.
685 */ 96 */
686int idr_for_each(struct idr *idp, 97int idr_for_each(const struct idr *idr,
687 int (*fn)(int id, void *p, void *data), void *data) 98 int (*fn)(int id, void *p, void *data), void *data)
688{ 99{
689 int n, id, max, error = 0; 100 struct radix_tree_iter iter;
690 struct idr_layer *p; 101 void __rcu **slot;
691 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
692 struct idr_layer **paa = &pa[0];
693
694 n = idp->layers * IDR_BITS;
695 *paa = rcu_dereference_raw(idp->top);
696 max = idr_max(idp->layers);
697 102
698 id = 0; 103 radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
699 while (id >= 0 && id <= max) { 104 int ret = fn(iter.index, rcu_dereference_raw(*slot), data);
700 p = *paa; 105 if (ret)
701 while (n > 0 && p) { 106 return ret;
702 n -= IDR_BITS;
703 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
704 *++paa = p;
705 }
706
707 if (p) {
708 error = fn(id, (void *)p, data);
709 if (error)
710 break;
711 }
712
713 id += 1 << n;
714 while (n < fls(id)) {
715 n += IDR_BITS;
716 --paa;
717 }
718 } 107 }
719 108
720 return error; 109 return 0;
721} 110}
722EXPORT_SYMBOL(idr_for_each); 111EXPORT_SYMBOL(idr_for_each);
723 112
724/** 113/**
725 * idr_get_next - lookup next object of id to given id. 114 * idr_get_next - Find next populated entry
726 * @idp: idr handle 115 * @idr: idr handle
727 * @nextidp: pointer to lookup key 116 * @nextid: Pointer to lowest possible ID to return
728 * 117 *
729 * Returns pointer to registered object with id, which is next number to 118 * Returns the next populated entry in the tree with an ID greater than
730 * given id. After being looked up, *@nextidp will be updated for the next 119 * or equal to the value pointed to by @nextid. On exit, @nextid is updated
731 * iteration. 120 * to the ID of the found value. To use in a loop, the value pointed to by
732 * 121 * nextid must be incremented by the user.
733 * This function can be called under rcu_read_lock(), given that the leaf
734 * pointers lifetimes are correctly managed.
735 */ 122 */
736void *idr_get_next(struct idr *idp, int *nextidp) 123void *idr_get_next(struct idr *idr, int *nextid)
737{ 124{
738 struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; 125 struct radix_tree_iter iter;
739 struct idr_layer **paa = &pa[0]; 126 void __rcu **slot;
740 int id = *nextidp;
741 int n, max;
742 127
743 /* find first ent */ 128 slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid);
744 p = *paa = rcu_dereference_raw(idp->top); 129 if (!slot)
745 if (!p)
746 return NULL; 130 return NULL;
747 n = (p->layer + 1) * IDR_BITS;
748 max = idr_max(p->layer + 1);
749
750 while (id >= 0 && id <= max) {
751 p = *paa;
752 while (n > 0 && p) {
753 n -= IDR_BITS;
754 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
755 *++paa = p;
756 }
757
758 if (p) {
759 *nextidp = id;
760 return p;
761 }
762 131
763 /* 132 *nextid = iter.index;
764 * Proceed to the next layer at the current level. Unlike 133 return rcu_dereference_raw(*slot);
765 * idr_for_each(), @id isn't guaranteed to be aligned to
766 * layer boundary at this point and adding 1 << n may
767 * incorrectly skip IDs. Make sure we jump to the
768 * beginning of the next layer using round_up().
769 */
770 id = round_up(id + 1, 1 << n);
771 while (n < fls(id)) {
772 n += IDR_BITS;
773 --paa;
774 }
775 }
776 return NULL;
777} 134}
778EXPORT_SYMBOL(idr_get_next); 135EXPORT_SYMBOL(idr_get_next);
779 136
780
781/** 137/**
782 * idr_replace - replace pointer for given id 138 * idr_replace - replace pointer for given id
783 * @idp: idr handle 139 * @idr: idr handle
784 * @ptr: pointer you want associated with the id 140 * @ptr: New pointer to associate with the ID
785 * @id: lookup key 141 * @id: Lookup key
786 * 142 *
787 * Replace the pointer registered with an id and return the old value. 143 * Replace the pointer registered with an ID and return the old value.
788 * A %-ENOENT return indicates that @id was not found. 144 * This function can be called under the RCU read lock concurrently with
789 * A %-EINVAL return indicates that @id was not within valid constraints. 145 * idr_alloc() and idr_remove() (as long as the ID being removed is not
146 * the one being replaced!).
790 * 147 *
791 * The caller must serialize with writers. 148 * Returns: 0 on success. %-ENOENT indicates that @id was not found.
149 * %-EINVAL indicates that @id or @ptr were not valid.
792 */ 150 */
793void *idr_replace(struct idr *idp, void *ptr, int id) 151void *idr_replace(struct idr *idr, void *ptr, int id)
794{ 152{
795 int n; 153 struct radix_tree_node *node;
796 struct idr_layer *p, *old_p; 154 void __rcu **slot = NULL;
155 void *entry;
797 156
798 if (id < 0) 157 if (WARN_ON_ONCE(id < 0))
158 return ERR_PTR(-EINVAL);
159 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
799 return ERR_PTR(-EINVAL); 160 return ERR_PTR(-EINVAL);
800 161
801 p = idp->top; 162 entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
802 if (!p) 163 if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
803 return ERR_PTR(-ENOENT);
804
805 if (id > idr_max(p->layer + 1))
806 return ERR_PTR(-ENOENT);
807
808 n = p->layer * IDR_BITS;
809 while ((n > 0) && p) {
810 p = p->ary[(id >> n) & IDR_MASK];
811 n -= IDR_BITS;
812 }
813
814 n = id & IDR_MASK;
815 if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
816 return ERR_PTR(-ENOENT); 164 return ERR_PTR(-ENOENT);
817 165
818 old_p = p->ary[n]; 166 __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL, NULL);
819 rcu_assign_pointer(p->ary[n], ptr);
820 167
821 return old_p; 168 return entry;
822} 169}
823EXPORT_SYMBOL(idr_replace); 170EXPORT_SYMBOL(idr_replace);
824 171
825void __init idr_init_cache(void)
826{
827 idr_layer_cache = kmem_cache_create("idr_layer_cache",
828 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
829}
830
831/**
832 * idr_init - initialize idr handle
833 * @idp: idr handle
834 *
835 * This function is use to set up the handle (@idp) that you will pass
836 * to the rest of the functions.
837 */
838void idr_init(struct idr *idp)
839{
840 memset(idp, 0, sizeof(struct idr));
841 spin_lock_init(&idp->lock);
842}
843EXPORT_SYMBOL(idr_init);
844
845static int idr_has_entry(int id, void *p, void *data)
846{
847 return 1;
848}
849
850bool idr_is_empty(struct idr *idp)
851{
852 return !idr_for_each(idp, idr_has_entry, NULL);
853}
854EXPORT_SYMBOL(idr_is_empty);
855
856/** 172/**
857 * DOC: IDA description 173 * DOC: IDA description
858 * IDA - IDR based ID allocator
859 * 174 *
860 * This is id allocator without id -> pointer translation. Memory 175 * The IDA is an ID allocator which does not provide the ability to
861 * usage is much lower than full blown idr because each id only 176 * associate an ID with a pointer. As such, it only needs to store one
862 * occupies a bit. ida uses a custom leaf node which contains 177 * bit per ID, and so is more space efficient than an IDR. To use an IDA,
863 * IDA_BITMAP_BITS slots. 178 * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
864 * 179 * then initialise it using ida_init()). To allocate a new ID, call
865 * 2007-04-25 written by Tejun Heo <htejun@gmail.com> 180 * ida_simple_get(). To free an ID, call ida_simple_remove().
181 *
182 * If you have more complex locking requirements, use a loop around
183 * ida_pre_get() and ida_get_new() to allocate a new ID. Then use
184 * ida_remove() to free an ID. You must make sure that ida_get_new() and
185 * ida_remove() cannot be called at the same time as each other for the
186 * same IDA.
187 *
188 * You can also use ida_get_new_above() if you need an ID to be allocated
189 * above a particular number. ida_destroy() can be used to dispose of an
190 * IDA without needing to free the individual IDs in it. You can use
191 * ida_is_empty() to find out whether the IDA has any IDs currently allocated.
192 *
193 * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
194 * limitation, it should be quite straightforward to raise the maximum.
866 */ 195 */
867 196
868static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) 197/*
869{ 198 * Developer's notes:
870 unsigned long flags; 199 *
871 200 * The IDA uses the functionality provided by the IDR & radix tree to store
872 if (!ida->free_bitmap) { 201 * bitmaps in each entry. The IDR_FREE tag means there is at least one bit
873 spin_lock_irqsave(&ida->idr.lock, flags); 202 * free, unlike the IDR where it means at least one entry is free.
874 if (!ida->free_bitmap) { 203 *
875 ida->free_bitmap = bitmap; 204 * I considered telling the radix tree that each slot is an order-10 node
876 bitmap = NULL; 205 * and storing the bit numbers in the radix tree, but the radix tree can't
877 } 206 * allow a single multiorder entry at index 0, which would significantly
878 spin_unlock_irqrestore(&ida->idr.lock, flags); 207 * increase memory consumption for the IDA. So instead we divide the index
879 } 208 * by the number of bits in the leaf bitmap before doing a radix tree lookup.
880 209 *
881 kfree(bitmap); 210 * As an optimisation, if there are only a few low bits set in any given
882} 211 * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional
883 212 * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits
884/** 213 * directly in the entry. By being really tricksy, we could store
885 * ida_pre_get - reserve resources for ida allocation 214 * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising
886 * @ida: ida handle 215 * for 0-3 allocated IDs.
887 * @gfp_mask: memory allocation flag 216 *
888 * 217 * We allow the radix tree 'exceptional' count to get out of date. Nothing
889 * This function should be called prior to locking and calling the 218 * in the IDA nor the radix tree code checks it. If it becomes important
890 * following function. It preallocates enough memory to satisfy the 219 * to maintain an accurate exceptional count, switch the rcu_assign_pointer()
891 * worst possible allocation. 220 * calls to radix_tree_iter_replace() which will correct the exceptional
892 * 221 * count.
893 * If the system is REALLY out of memory this function returns %0, 222 *
894 * otherwise %1. 223 * The IDA always requires a lock to alloc/free. If we add a 'test_bit'
224 * equivalent, it will still need locking. Going to RCU lookup would require
225 * using RCU to free bitmaps, and that's not trivial without embedding an
226 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
227 * bitmap, which is excessive.
895 */ 228 */
896int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
897{
898 /* allocate idr_layers */
899 if (!__idr_pre_get(&ida->idr, gfp_mask))
900 return 0;
901 229
902 /* allocate free_bitmap */ 230#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS)
903 if (!ida->free_bitmap) {
904 struct ida_bitmap *bitmap;
905
906 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
907 if (!bitmap)
908 return 0;
909
910 free_bitmap(ida, bitmap);
911 }
912
913 return 1;
914}
915EXPORT_SYMBOL(ida_pre_get);
916 231
917/** 232/**
918 * ida_get_new_above - allocate new ID above or equal to a start id 233 * ida_get_new_above - allocate new ID above or equal to a start id
919 * @ida: ida handle 234 * @ida: ida handle
920 * @starting_id: id to start search at 235 * @start: id to start search at
921 * @p_id: pointer to the allocated handle 236 * @id: pointer to the allocated handle
922 * 237 *
923 * Allocate new ID above or equal to @starting_id. It should be called 238 * Allocate new ID above or equal to @start. It should be called
924 * with any required locks. 239 * with any required locks to ensure that concurrent calls to
240 * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed.
241 * Consider using ida_simple_get() if you do not have complex locking
242 * requirements.
925 * 243 *
926 * If memory is required, it will return %-EAGAIN, you should unlock 244 * If memory is required, it will return %-EAGAIN, you should unlock
927 * and go back to the ida_pre_get() call. If the ida is full, it will 245 * and go back to the ida_pre_get() call. If the ida is full, it will
928 * return %-ENOSPC. 246 * return %-ENOSPC. On success, it will return 0.
929 *
930 * Note that callers must ensure that concurrent access to @ida is not possible.
931 * See ida_simple_get() for a varaint which takes care of locking.
932 * 247 *
933 * @p_id returns a value in the range @starting_id ... %0x7fffffff. 248 * @id returns a value in the range @start ... %0x7fffffff.
934 */ 249 */
935int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) 250int ida_get_new_above(struct ida *ida, int start, int *id)
936{ 251{
937 struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 252 struct radix_tree_root *root = &ida->ida_rt;
253 void __rcu **slot;
254 struct radix_tree_iter iter;
938 struct ida_bitmap *bitmap; 255 struct ida_bitmap *bitmap;
939 unsigned long flags; 256 unsigned long index;
940 int idr_id = starting_id / IDA_BITMAP_BITS; 257 unsigned bit, ebit;
941 int offset = starting_id % IDA_BITMAP_BITS; 258 int new;
942 int t, id; 259
943 260 index = start / IDA_BITMAP_BITS;
944 restart: 261 bit = start % IDA_BITMAP_BITS;
945 /* get vacant slot */ 262 ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT;
946 t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); 263
947 if (t < 0) 264 slot = radix_tree_iter_init(&iter, index);
948 return t == -ENOMEM ? -EAGAIN : t; 265 for (;;) {
949 266 if (slot)
950 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) 267 slot = radix_tree_next_slot(slot, &iter,
951 return -ENOSPC; 268 RADIX_TREE_ITER_TAGGED);
952 269 if (!slot) {
953 if (t != idr_id) 270 slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX);
954 offset = 0; 271 if (IS_ERR(slot)) {
955 idr_id = t; 272 if (slot == ERR_PTR(-ENOMEM))
956 273 return -EAGAIN;
957 /* if bitmap isn't there, create a new one */ 274 return PTR_ERR(slot);
958 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; 275 }
959 if (!bitmap) { 276 }
960 spin_lock_irqsave(&ida->idr.lock, flags); 277 if (iter.index > index) {
961 bitmap = ida->free_bitmap; 278 bit = 0;
962 ida->free_bitmap = NULL; 279 ebit = RADIX_TREE_EXCEPTIONAL_SHIFT;
963 spin_unlock_irqrestore(&ida->idr.lock, flags); 280 }
964 281 new = iter.index * IDA_BITMAP_BITS;
965 if (!bitmap) 282 bitmap = rcu_dereference_raw(*slot);
966 return -EAGAIN; 283 if (radix_tree_exception(bitmap)) {
967 284 unsigned long tmp = (unsigned long)bitmap;
968 memset(bitmap, 0, sizeof(struct ida_bitmap)); 285 ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit);
969 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], 286 if (ebit < BITS_PER_LONG) {
970 (void *)bitmap); 287 tmp |= 1UL << ebit;
971 pa[0]->count++; 288 rcu_assign_pointer(*slot, (void *)tmp);
972 } 289 *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT;
973 290 return 0;
974 /* lookup for empty slot */ 291 }
975 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); 292 bitmap = this_cpu_xchg(ida_bitmap, NULL);
976 if (t == IDA_BITMAP_BITS) { 293 if (!bitmap)
977 /* no empty slot after offset, continue to the next chunk */ 294 return -EAGAIN;
978 idr_id++; 295 memset(bitmap, 0, sizeof(*bitmap));
979 offset = 0; 296 bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
980 goto restart; 297 rcu_assign_pointer(*slot, bitmap);
981 } 298 }
982
983 id = idr_id * IDA_BITMAP_BITS + t;
984 if (id >= MAX_IDR_BIT)
985 return -ENOSPC;
986 299
987 __set_bit(t, bitmap->bitmap); 300 if (bitmap) {
988 if (++bitmap->nr_busy == IDA_BITMAP_BITS) 301 bit = find_next_zero_bit(bitmap->bitmap,
989 idr_mark_full(pa, idr_id); 302 IDA_BITMAP_BITS, bit);
303 new += bit;
304 if (new < 0)
305 return -ENOSPC;
306 if (bit == IDA_BITMAP_BITS)
307 continue;
990 308
991 *p_id = id; 309 __set_bit(bit, bitmap->bitmap);
310 if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
311 radix_tree_iter_tag_clear(root, &iter,
312 IDR_FREE);
313 } else {
314 new += bit;
315 if (new < 0)
316 return -ENOSPC;
317 if (ebit < BITS_PER_LONG) {
318 bitmap = (void *)((1UL << ebit) |
319 RADIX_TREE_EXCEPTIONAL_ENTRY);
320 radix_tree_iter_replace(root, &iter, slot,
321 bitmap);
322 *id = new;
323 return 0;
324 }
325 bitmap = this_cpu_xchg(ida_bitmap, NULL);
326 if (!bitmap)
327 return -EAGAIN;
328 memset(bitmap, 0, sizeof(*bitmap));
329 __set_bit(bit, bitmap->bitmap);
330 radix_tree_iter_replace(root, &iter, slot, bitmap);
331 }
992 332
993 /* Each leaf node can handle nearly a thousand slots and the 333 *id = new;
994 * whole idea of ida is to have small memory foot print. 334 return 0;
995 * Throw away extra resources one by one after each successful
996 * allocation.
997 */
998 if (ida->idr.id_free_cnt || ida->free_bitmap) {
999 struct idr_layer *p = get_from_free_list(&ida->idr);
1000 if (p)
1001 kmem_cache_free(idr_layer_cache, p);
1002 } 335 }
1003
1004 return 0;
1005} 336}
1006EXPORT_SYMBOL(ida_get_new_above); 337EXPORT_SYMBOL(ida_get_new_above);
1007 338
1008/** 339/**
1009 * ida_remove - remove the given ID 340 * ida_remove - Free the given ID
1010 * @ida: ida handle 341 * @ida: ida handle
1011 * @id: ID to free 342 * @id: ID to free
343 *
344 * This function should not be called at the same time as ida_get_new_above().
1012 */ 345 */
1013void ida_remove(struct ida *ida, int id) 346void ida_remove(struct ida *ida, int id)
1014{ 347{
1015 struct idr_layer *p = ida->idr.top; 348 unsigned long index = id / IDA_BITMAP_BITS;
1016 int shift = (ida->idr.layers - 1) * IDR_BITS; 349 unsigned offset = id % IDA_BITMAP_BITS;
1017 int idr_id = id / IDA_BITMAP_BITS;
1018 int offset = id % IDA_BITMAP_BITS;
1019 int n;
1020 struct ida_bitmap *bitmap; 350 struct ida_bitmap *bitmap;
351 unsigned long *btmp;
352 struct radix_tree_iter iter;
353 void __rcu **slot;
1021 354
1022 if (idr_id > idr_max(ida->idr.layers)) 355 slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index);
356 if (!slot)
1023 goto err; 357 goto err;
1024 358
1025 /* clear full bits while looking up the leaf idr_layer */ 359 bitmap = rcu_dereference_raw(*slot);
1026 while ((shift > 0) && p) { 360 if (radix_tree_exception(bitmap)) {
1027 n = (idr_id >> shift) & IDR_MASK; 361 btmp = (unsigned long *)slot;
1028 __clear_bit(n, p->bitmap); 362 offset += RADIX_TREE_EXCEPTIONAL_SHIFT;
1029 p = p->ary[n]; 363 if (offset >= BITS_PER_LONG)
1030 shift -= IDR_BITS; 364 goto err;
365 } else {
366 btmp = bitmap->bitmap;
1031 } 367 }
1032 368 if (!test_bit(offset, btmp))
1033 if (p == NULL)
1034 goto err;
1035
1036 n = idr_id & IDR_MASK;
1037 __clear_bit(n, p->bitmap);
1038
1039 bitmap = (void *)p->ary[n];
1040 if (!bitmap || !test_bit(offset, bitmap->bitmap))
1041 goto err; 369 goto err;
1042 370
1043 /* update bitmap and remove it if empty */ 371 __clear_bit(offset, btmp);
1044 __clear_bit(offset, bitmap->bitmap); 372 radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE);
1045 if (--bitmap->nr_busy == 0) { 373 if (radix_tree_exception(bitmap)) {
1046 __set_bit(n, p->bitmap); /* to please idr_remove() */ 374 if (rcu_dereference_raw(*slot) ==
1047 idr_remove(&ida->idr, idr_id); 375 (void *)RADIX_TREE_EXCEPTIONAL_ENTRY)
1048 free_bitmap(ida, bitmap); 376 radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
377 } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) {
378 kfree(bitmap);
379 radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
1049 } 380 }
1050
1051 return; 381 return;
1052
1053 err: 382 err:
1054 WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); 383 WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
1055} 384}
1056EXPORT_SYMBOL(ida_remove); 385EXPORT_SYMBOL(ida_remove);
1057 386
1058/** 387/**
1059 * ida_destroy - release all cached layers within an ida tree 388 * ida_destroy - Free the contents of an ida
1060 * @ida: ida handle 389 * @ida: ida handle
390 *
391 * Calling this function releases all resources associated with an IDA. When
392 * this call returns, the IDA is empty and can be reused or freed. The caller
393 * should not allow ida_remove() or ida_get_new_above() to be called at the
394 * same time.
1061 */ 395 */
1062void ida_destroy(struct ida *ida) 396void ida_destroy(struct ida *ida)
1063{ 397{
1064 idr_destroy(&ida->idr); 398 struct radix_tree_iter iter;
1065 kfree(ida->free_bitmap); 399 void __rcu **slot;
400
401 radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) {
402 struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
403 if (!radix_tree_exception(bitmap))
404 kfree(bitmap);
405 radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
406 }
1066} 407}
1067EXPORT_SYMBOL(ida_destroy); 408EXPORT_SYMBOL(ida_destroy);
1068 409
@@ -1141,18 +482,3 @@ void ida_simple_remove(struct ida *ida, unsigned int id)
1141 spin_unlock_irqrestore(&simple_ida_lock, flags); 482 spin_unlock_irqrestore(&simple_ida_lock, flags);
1142} 483}
1143EXPORT_SYMBOL(ida_simple_remove); 484EXPORT_SYMBOL(ida_simple_remove);
1144
1145/**
1146 * ida_init - initialize ida handle
1147 * @ida: ida handle
1148 *
1149 * This function is use to set up the handle (@ida) that you will pass
1150 * to the rest of the functions.
1151 */
1152void ida_init(struct ida *ida)
1153{
1154 memset(ida, 0, sizeof(struct ida));
1155 idr_init(&ida->idr);
1156
1157}
1158EXPORT_SYMBOL(ida_init);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 72fab4999c00..5ed506d648c4 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -22,20 +22,21 @@
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */ 23 */
24 24
25#include <linux/bitmap.h>
26#include <linux/bitops.h>
25#include <linux/cpu.h> 27#include <linux/cpu.h>
26#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/export.h>
30#include <linux/idr.h>
27#include <linux/init.h> 31#include <linux/init.h>
28#include <linux/kernel.h> 32#include <linux/kernel.h>
29#include <linux/export.h> 33#include <linux/kmemleak.h>
30#include <linux/radix-tree.h>
31#include <linux/percpu.h> 34#include <linux/percpu.h>
35#include <linux/preempt.h> /* in_interrupt() */
36#include <linux/radix-tree.h>
37#include <linux/rcupdate.h>
32#include <linux/slab.h> 38#include <linux/slab.h>
33#include <linux/kmemleak.h>
34#include <linux/cpu.h>
35#include <linux/string.h> 39#include <linux/string.h>
36#include <linux/bitops.h>
37#include <linux/rcupdate.h>
38#include <linux/preempt.h> /* in_interrupt() */
39 40
40 41
41/* Number of nodes in fully populated tree of given height */ 42/* Number of nodes in fully populated tree of given height */
@@ -60,11 +61,28 @@ static struct kmem_cache *radix_tree_node_cachep;
60#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) 61#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
61 62
62/* 63/*
64 * The IDR does not have to be as high as the radix tree since it uses
65 * signed integers, not unsigned longs.
66 */
67#define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
68#define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
69 RADIX_TREE_MAP_SHIFT))
70#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
71
72/*
73 * The IDA is even shorter since it uses a bitmap at the last level.
74 */
75#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
76#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
77 RADIX_TREE_MAP_SHIFT))
78#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
79
80/*
63 * Per-cpu pool of preloaded nodes 81 * Per-cpu pool of preloaded nodes
64 */ 82 */
65struct radix_tree_preload { 83struct radix_tree_preload {
66 unsigned nr; 84 unsigned nr;
67 /* nodes->private_data points to next preallocated node */ 85 /* nodes->parent points to next preallocated node */
68 struct radix_tree_node *nodes; 86 struct radix_tree_node *nodes;
69}; 87};
70static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; 88static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
@@ -83,35 +101,38 @@ static inline void *node_to_entry(void *ptr)
83 101
84#ifdef CONFIG_RADIX_TREE_MULTIORDER 102#ifdef CONFIG_RADIX_TREE_MULTIORDER
85/* Sibling slots point directly to another slot in the same node */ 103/* Sibling slots point directly to another slot in the same node */
86static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) 104static inline
105bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
87{ 106{
88 void **ptr = node; 107 void __rcu **ptr = node;
89 return (parent->slots <= ptr) && 108 return (parent->slots <= ptr) &&
90 (ptr < parent->slots + RADIX_TREE_MAP_SIZE); 109 (ptr < parent->slots + RADIX_TREE_MAP_SIZE);
91} 110}
92#else 111#else
93static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) 112static inline
113bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
94{ 114{
95 return false; 115 return false;
96} 116}
97#endif 117#endif
98 118
99static inline unsigned long get_slot_offset(struct radix_tree_node *parent, 119static inline unsigned long
100 void **slot) 120get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
101{ 121{
102 return slot - parent->slots; 122 return slot - parent->slots;
103} 123}
104 124
105static unsigned int radix_tree_descend(struct radix_tree_node *parent, 125static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
106 struct radix_tree_node **nodep, unsigned long index) 126 struct radix_tree_node **nodep, unsigned long index)
107{ 127{
108 unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; 128 unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
109 void **entry = rcu_dereference_raw(parent->slots[offset]); 129 void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
110 130
111#ifdef CONFIG_RADIX_TREE_MULTIORDER 131#ifdef CONFIG_RADIX_TREE_MULTIORDER
112 if (radix_tree_is_internal_node(entry)) { 132 if (radix_tree_is_internal_node(entry)) {
113 if (is_sibling_entry(parent, entry)) { 133 if (is_sibling_entry(parent, entry)) {
114 void **sibentry = (void **) entry_to_node(entry); 134 void __rcu **sibentry;
135 sibentry = (void __rcu **) entry_to_node(entry);
115 offset = get_slot_offset(parent, sibentry); 136 offset = get_slot_offset(parent, sibentry);
116 entry = rcu_dereference_raw(*sibentry); 137 entry = rcu_dereference_raw(*sibentry);
117 } 138 }
@@ -122,7 +143,7 @@ static unsigned int radix_tree_descend(struct radix_tree_node *parent,
122 return offset; 143 return offset;
123} 144}
124 145
125static inline gfp_t root_gfp_mask(struct radix_tree_root *root) 146static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
126{ 147{
127 return root->gfp_mask & __GFP_BITS_MASK; 148 return root->gfp_mask & __GFP_BITS_MASK;
128} 149}
@@ -139,42 +160,48 @@ static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
139 __clear_bit(offset, node->tags[tag]); 160 __clear_bit(offset, node->tags[tag]);
140} 161}
141 162
142static inline int tag_get(struct radix_tree_node *node, unsigned int tag, 163static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
143 int offset) 164 int offset)
144{ 165{
145 return test_bit(offset, node->tags[tag]); 166 return test_bit(offset, node->tags[tag]);
146} 167}
147 168
148static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) 169static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
149{ 170{
150 root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); 171 root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
151} 172}
152 173
153static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) 174static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
154{ 175{
155 root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); 176 root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
156} 177}
157 178
158static inline void root_tag_clear_all(struct radix_tree_root *root) 179static inline void root_tag_clear_all(struct radix_tree_root *root)
159{ 180{
160 root->gfp_mask &= __GFP_BITS_MASK; 181 root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1;
182}
183
184static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
185{
186 return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT));
161} 187}
162 188
163static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) 189static inline unsigned root_tags_get(const struct radix_tree_root *root)
164{ 190{
165 return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); 191 return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT;
166} 192}
167 193
168static inline unsigned root_tags_get(struct radix_tree_root *root) 194static inline bool is_idr(const struct radix_tree_root *root)
169{ 195{
170 return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT; 196 return !!(root->gfp_mask & ROOT_IS_IDR);
171} 197}
172 198
173/* 199/*
174 * Returns 1 if any slot in the node has this tag set. 200 * Returns 1 if any slot in the node has this tag set.
175 * Otherwise returns 0. 201 * Otherwise returns 0.
176 */ 202 */
177static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) 203static inline int any_tag_set(const struct radix_tree_node *node,
204 unsigned int tag)
178{ 205{
179 unsigned idx; 206 unsigned idx;
180 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { 207 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
@@ -184,6 +211,11 @@ static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
184 return 0; 211 return 0;
185} 212}
186 213
214static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
215{
216 bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
217}
218
187/** 219/**
188 * radix_tree_find_next_bit - find the next set bit in a memory region 220 * radix_tree_find_next_bit - find the next set bit in a memory region
189 * 221 *
@@ -232,11 +264,18 @@ static inline unsigned long shift_maxindex(unsigned int shift)
232 return (RADIX_TREE_MAP_SIZE << shift) - 1; 264 return (RADIX_TREE_MAP_SIZE << shift) - 1;
233} 265}
234 266
235static inline unsigned long node_maxindex(struct radix_tree_node *node) 267static inline unsigned long node_maxindex(const struct radix_tree_node *node)
236{ 268{
237 return shift_maxindex(node->shift); 269 return shift_maxindex(node->shift);
238} 270}
239 271
272static unsigned long next_index(unsigned long index,
273 const struct radix_tree_node *node,
274 unsigned long offset)
275{
276 return (index & ~node_maxindex(node)) + (offset << node->shift);
277}
278
240#ifndef __KERNEL__ 279#ifndef __KERNEL__
241static void dump_node(struct radix_tree_node *node, unsigned long index) 280static void dump_node(struct radix_tree_node *node, unsigned long index)
242{ 281{
@@ -275,11 +314,59 @@ static void radix_tree_dump(struct radix_tree_root *root)
275{ 314{
276 pr_debug("radix root: %p rnode %p tags %x\n", 315 pr_debug("radix root: %p rnode %p tags %x\n",
277 root, root->rnode, 316 root, root->rnode,
278 root->gfp_mask >> __GFP_BITS_SHIFT); 317 root->gfp_mask >> ROOT_TAG_SHIFT);
279 if (!radix_tree_is_internal_node(root->rnode)) 318 if (!radix_tree_is_internal_node(root->rnode))
280 return; 319 return;
281 dump_node(entry_to_node(root->rnode), 0); 320 dump_node(entry_to_node(root->rnode), 0);
282} 321}
322
323static void dump_ida_node(void *entry, unsigned long index)
324{
325 unsigned long i;
326
327 if (!entry)
328 return;
329
330 if (radix_tree_is_internal_node(entry)) {
331 struct radix_tree_node *node = entry_to_node(entry);
332
333 pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
334 node, node->offset, index * IDA_BITMAP_BITS,
335 ((index | node_maxindex(node)) + 1) *
336 IDA_BITMAP_BITS - 1,
337 node->parent, node->tags[0][0], node->shift,
338 node->count);
339 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
340 dump_ida_node(node->slots[i],
341 index | (i << node->shift));
342 } else if (radix_tree_exceptional_entry(entry)) {
343 pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n",
344 entry, (int)(index & RADIX_TREE_MAP_MASK),
345 index * IDA_BITMAP_BITS,
346 index * IDA_BITMAP_BITS + BITS_PER_LONG -
347 RADIX_TREE_EXCEPTIONAL_SHIFT,
348 (unsigned long)entry >>
349 RADIX_TREE_EXCEPTIONAL_SHIFT);
350 } else {
351 struct ida_bitmap *bitmap = entry;
352
353 pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap,
354 (int)(index & RADIX_TREE_MAP_MASK),
355 index * IDA_BITMAP_BITS,
356 (index + 1) * IDA_BITMAP_BITS - 1);
357 for (i = 0; i < IDA_BITMAP_LONGS; i++)
358 pr_cont(" %lx", bitmap->bitmap[i]);
359 pr_cont("\n");
360 }
361}
362
363static void ida_dump(struct ida *ida)
364{
365 struct radix_tree_root *root = &ida->ida_rt;
366 pr_debug("ida: %p node %p free %d\n", ida, root->rnode,
367 root->gfp_mask >> ROOT_TAG_SHIFT);
368 dump_ida_node(root->rnode, 0);
369}
283#endif 370#endif
284 371
285/* 372/*
@@ -287,13 +374,12 @@ static void radix_tree_dump(struct radix_tree_root *root)
287 * that the caller has pinned this thread of control to the current CPU. 374 * that the caller has pinned this thread of control to the current CPU.
288 */ 375 */
289static struct radix_tree_node * 376static struct radix_tree_node *
290radix_tree_node_alloc(struct radix_tree_root *root, 377radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
291 struct radix_tree_node *parent, 378 struct radix_tree_root *root,
292 unsigned int shift, unsigned int offset, 379 unsigned int shift, unsigned int offset,
293 unsigned int count, unsigned int exceptional) 380 unsigned int count, unsigned int exceptional)
294{ 381{
295 struct radix_tree_node *ret = NULL; 382 struct radix_tree_node *ret = NULL;
296 gfp_t gfp_mask = root_gfp_mask(root);
297 383
298 /* 384 /*
299 * Preload code isn't irq safe and it doesn't make sense to use 385 * Preload code isn't irq safe and it doesn't make sense to use
@@ -321,8 +407,7 @@ radix_tree_node_alloc(struct radix_tree_root *root,
321 rtp = this_cpu_ptr(&radix_tree_preloads); 407 rtp = this_cpu_ptr(&radix_tree_preloads);
322 if (rtp->nr) { 408 if (rtp->nr) {
323 ret = rtp->nodes; 409 ret = rtp->nodes;
324 rtp->nodes = ret->private_data; 410 rtp->nodes = ret->parent;
325 ret->private_data = NULL;
326 rtp->nr--; 411 rtp->nr--;
327 } 412 }
328 /* 413 /*
@@ -336,11 +421,12 @@ radix_tree_node_alloc(struct radix_tree_root *root,
336out: 421out:
337 BUG_ON(radix_tree_is_internal_node(ret)); 422 BUG_ON(radix_tree_is_internal_node(ret));
338 if (ret) { 423 if (ret) {
339 ret->parent = parent;
340 ret->shift = shift; 424 ret->shift = shift;
341 ret->offset = offset; 425 ret->offset = offset;
342 ret->count = count; 426 ret->count = count;
343 ret->exceptional = exceptional; 427 ret->exceptional = exceptional;
428 ret->parent = parent;
429 ret->root = root;
344 } 430 }
345 return ret; 431 return ret;
346} 432}
@@ -399,7 +485,7 @@ static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
399 preempt_disable(); 485 preempt_disable();
400 rtp = this_cpu_ptr(&radix_tree_preloads); 486 rtp = this_cpu_ptr(&radix_tree_preloads);
401 if (rtp->nr < nr) { 487 if (rtp->nr < nr) {
402 node->private_data = rtp->nodes; 488 node->parent = rtp->nodes;
403 rtp->nodes = node; 489 rtp->nodes = node;
404 rtp->nr++; 490 rtp->nr++;
405 } else { 491 } else {
@@ -510,7 +596,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
510 return __radix_tree_preload(gfp_mask, nr_nodes); 596 return __radix_tree_preload(gfp_mask, nr_nodes);
511} 597}
512 598
513static unsigned radix_tree_load_root(struct radix_tree_root *root, 599static unsigned radix_tree_load_root(const struct radix_tree_root *root,
514 struct radix_tree_node **nodep, unsigned long *maxindex) 600 struct radix_tree_node **nodep, unsigned long *maxindex)
515{ 601{
516 struct radix_tree_node *node = rcu_dereference_raw(root->rnode); 602 struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
@@ -530,10 +616,10 @@ static unsigned radix_tree_load_root(struct radix_tree_root *root,
530/* 616/*
531 * Extend a radix tree so it can store key @index. 617 * Extend a radix tree so it can store key @index.
532 */ 618 */
533static int radix_tree_extend(struct radix_tree_root *root, 619static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
534 unsigned long index, unsigned int shift) 620 unsigned long index, unsigned int shift)
535{ 621{
536 struct radix_tree_node *slot; 622 void *entry;
537 unsigned int maxshift; 623 unsigned int maxshift;
538 int tag; 624 int tag;
539 625
@@ -542,32 +628,44 @@ static int radix_tree_extend(struct radix_tree_root *root,
542 while (index > shift_maxindex(maxshift)) 628 while (index > shift_maxindex(maxshift))
543 maxshift += RADIX_TREE_MAP_SHIFT; 629 maxshift += RADIX_TREE_MAP_SHIFT;
544 630
545 slot = root->rnode; 631 entry = rcu_dereference_raw(root->rnode);
546 if (!slot) 632 if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
547 goto out; 633 goto out;
548 634
549 do { 635 do {
550 struct radix_tree_node *node = radix_tree_node_alloc(root, 636 struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
551 NULL, shift, 0, 1, 0); 637 root, shift, 0, 1, 0);
552 if (!node) 638 if (!node)
553 return -ENOMEM; 639 return -ENOMEM;
554 640
555 /* Propagate the aggregated tag info into the new root */ 641 if (is_idr(root)) {
556 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 642 all_tag_set(node, IDR_FREE);
557 if (root_tag_get(root, tag)) 643 if (!root_tag_get(root, IDR_FREE)) {
558 tag_set(node, tag, 0); 644 tag_clear(node, IDR_FREE, 0);
645 root_tag_set(root, IDR_FREE);
646 }
647 } else {
648 /* Propagate the aggregated tag info to the new child */
649 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
650 if (root_tag_get(root, tag))
651 tag_set(node, tag, 0);
652 }
559 } 653 }
560 654
561 BUG_ON(shift > BITS_PER_LONG); 655 BUG_ON(shift > BITS_PER_LONG);
562 if (radix_tree_is_internal_node(slot)) { 656 if (radix_tree_is_internal_node(entry)) {
563 entry_to_node(slot)->parent = node; 657 entry_to_node(entry)->parent = node;
564 } else if (radix_tree_exceptional_entry(slot)) { 658 } else if (radix_tree_exceptional_entry(entry)) {
565 /* Moving an exceptional root->rnode to a node */ 659 /* Moving an exceptional root->rnode to a node */
566 node->exceptional = 1; 660 node->exceptional = 1;
567 } 661 }
568 node->slots[0] = slot; 662 /*
569 slot = node_to_entry(node); 663 * entry was already in the radix tree, so we do not need
570 rcu_assign_pointer(root->rnode, slot); 664 * rcu_assign_pointer here
665 */
666 node->slots[0] = (void __rcu *)entry;
667 entry = node_to_entry(node);
668 rcu_assign_pointer(root->rnode, entry);
571 shift += RADIX_TREE_MAP_SHIFT; 669 shift += RADIX_TREE_MAP_SHIFT;
572 } while (shift <= maxshift); 670 } while (shift <= maxshift);
573out: 671out:
@@ -578,12 +676,14 @@ out:
578 * radix_tree_shrink - shrink radix tree to minimum height 676 * radix_tree_shrink - shrink radix tree to minimum height
579 * @root radix tree root 677 * @root radix tree root
580 */ 678 */
581static inline void radix_tree_shrink(struct radix_tree_root *root, 679static inline bool radix_tree_shrink(struct radix_tree_root *root,
582 radix_tree_update_node_t update_node, 680 radix_tree_update_node_t update_node,
583 void *private) 681 void *private)
584{ 682{
683 bool shrunk = false;
684
585 for (;;) { 685 for (;;) {
586 struct radix_tree_node *node = root->rnode; 686 struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
587 struct radix_tree_node *child; 687 struct radix_tree_node *child;
588 688
589 if (!radix_tree_is_internal_node(node)) 689 if (!radix_tree_is_internal_node(node))
@@ -597,7 +697,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
597 */ 697 */
598 if (node->count != 1) 698 if (node->count != 1)
599 break; 699 break;
600 child = node->slots[0]; 700 child = rcu_dereference_raw(node->slots[0]);
601 if (!child) 701 if (!child)
602 break; 702 break;
603 if (!radix_tree_is_internal_node(child) && node->shift) 703 if (!radix_tree_is_internal_node(child) && node->shift)
@@ -613,7 +713,9 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
613 * (node->slots[0]), it will be safe to dereference the new 713 * (node->slots[0]), it will be safe to dereference the new
614 * one (root->rnode) as far as dependent read barriers go. 714 * one (root->rnode) as far as dependent read barriers go.
615 */ 715 */
616 root->rnode = child; 716 root->rnode = (void __rcu *)child;
717 if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
718 root_tag_clear(root, IDR_FREE);
617 719
618 /* 720 /*
619 * We have a dilemma here. The node's slot[0] must not be 721 * We have a dilemma here. The node's slot[0] must not be
@@ -635,27 +737,34 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
635 */ 737 */
636 node->count = 0; 738 node->count = 0;
637 if (!radix_tree_is_internal_node(child)) { 739 if (!radix_tree_is_internal_node(child)) {
638 node->slots[0] = RADIX_TREE_RETRY; 740 node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
639 if (update_node) 741 if (update_node)
640 update_node(node, private); 742 update_node(node, private);
641 } 743 }
642 744
643 WARN_ON_ONCE(!list_empty(&node->private_list)); 745 WARN_ON_ONCE(!list_empty(&node->private_list));
644 radix_tree_node_free(node); 746 radix_tree_node_free(node);
747 shrunk = true;
645 } 748 }
749
750 return shrunk;
646} 751}
647 752
648static void delete_node(struct radix_tree_root *root, 753static bool delete_node(struct radix_tree_root *root,
649 struct radix_tree_node *node, 754 struct radix_tree_node *node,
650 radix_tree_update_node_t update_node, void *private) 755 radix_tree_update_node_t update_node, void *private)
651{ 756{
757 bool deleted = false;
758
652 do { 759 do {
653 struct radix_tree_node *parent; 760 struct radix_tree_node *parent;
654 761
655 if (node->count) { 762 if (node->count) {
656 if (node == entry_to_node(root->rnode)) 763 if (node_to_entry(node) ==
657 radix_tree_shrink(root, update_node, private); 764 rcu_dereference_raw(root->rnode))
658 return; 765 deleted |= radix_tree_shrink(root, update_node,
766 private);
767 return deleted;
659 } 768 }
660 769
661 parent = node->parent; 770 parent = node->parent;
@@ -663,15 +772,23 @@ static void delete_node(struct radix_tree_root *root,
663 parent->slots[node->offset] = NULL; 772 parent->slots[node->offset] = NULL;
664 parent->count--; 773 parent->count--;
665 } else { 774 } else {
666 root_tag_clear_all(root); 775 /*
776 * Shouldn't the tags already have all been cleared
777 * by the caller?
778 */
779 if (!is_idr(root))
780 root_tag_clear_all(root);
667 root->rnode = NULL; 781 root->rnode = NULL;
668 } 782 }
669 783
670 WARN_ON_ONCE(!list_empty(&node->private_list)); 784 WARN_ON_ONCE(!list_empty(&node->private_list));
671 radix_tree_node_free(node); 785 radix_tree_node_free(node);
786 deleted = true;
672 787
673 node = parent; 788 node = parent;
674 } while (node); 789 } while (node);
790
791 return deleted;
675} 792}
676 793
677/** 794/**
@@ -693,13 +810,14 @@ static void delete_node(struct radix_tree_root *root,
693 */ 810 */
694int __radix_tree_create(struct radix_tree_root *root, unsigned long index, 811int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
695 unsigned order, struct radix_tree_node **nodep, 812 unsigned order, struct radix_tree_node **nodep,
696 void ***slotp) 813 void __rcu ***slotp)
697{ 814{
698 struct radix_tree_node *node = NULL, *child; 815 struct radix_tree_node *node = NULL, *child;
699 void **slot = (void **)&root->rnode; 816 void __rcu **slot = (void __rcu **)&root->rnode;
700 unsigned long maxindex; 817 unsigned long maxindex;
701 unsigned int shift, offset = 0; 818 unsigned int shift, offset = 0;
702 unsigned long max = index | ((1UL << order) - 1); 819 unsigned long max = index | ((1UL << order) - 1);
820 gfp_t gfp = root_gfp_mask(root);
703 821
704 shift = radix_tree_load_root(root, &child, &maxindex); 822 shift = radix_tree_load_root(root, &child, &maxindex);
705 823
@@ -707,18 +825,18 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
707 if (order > 0 && max == ((1UL << order) - 1)) 825 if (order > 0 && max == ((1UL << order) - 1))
708 max++; 826 max++;
709 if (max > maxindex) { 827 if (max > maxindex) {
710 int error = radix_tree_extend(root, max, shift); 828 int error = radix_tree_extend(root, gfp, max, shift);
711 if (error < 0) 829 if (error < 0)
712 return error; 830 return error;
713 shift = error; 831 shift = error;
714 child = root->rnode; 832 child = rcu_dereference_raw(root->rnode);
715 } 833 }
716 834
717 while (shift > order) { 835 while (shift > order) {
718 shift -= RADIX_TREE_MAP_SHIFT; 836 shift -= RADIX_TREE_MAP_SHIFT;
719 if (child == NULL) { 837 if (child == NULL) {
720 /* Have to add a child node. */ 838 /* Have to add a child node. */
721 child = radix_tree_node_alloc(root, node, shift, 839 child = radix_tree_node_alloc(gfp, node, root, shift,
722 offset, 0, 0); 840 offset, 0, 0);
723 if (!child) 841 if (!child)
724 return -ENOMEM; 842 return -ENOMEM;
@@ -741,7 +859,6 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
741 return 0; 859 return 0;
742} 860}
743 861
744#ifdef CONFIG_RADIX_TREE_MULTIORDER
745/* 862/*
746 * Free any nodes below this node. The tree is presumed to not need 863 * Free any nodes below this node. The tree is presumed to not need
747 * shrinking, and any user data in the tree is presumed to not need a 864 * shrinking, and any user data in the tree is presumed to not need a
@@ -757,7 +874,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
757 struct radix_tree_node *child = entry_to_node(node); 874 struct radix_tree_node *child = entry_to_node(node);
758 875
759 for (;;) { 876 for (;;) {
760 void *entry = child->slots[offset]; 877 void *entry = rcu_dereference_raw(child->slots[offset]);
761 if (radix_tree_is_internal_node(entry) && 878 if (radix_tree_is_internal_node(entry) &&
762 !is_sibling_entry(child, entry)) { 879 !is_sibling_entry(child, entry)) {
763 child = entry_to_node(entry); 880 child = entry_to_node(entry);
@@ -777,8 +894,9 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
777 } 894 }
778} 895}
779 896
780static inline int insert_entries(struct radix_tree_node *node, void **slot, 897#ifdef CONFIG_RADIX_TREE_MULTIORDER
781 void *item, unsigned order, bool replace) 898static inline int insert_entries(struct radix_tree_node *node,
899 void __rcu **slot, void *item, unsigned order, bool replace)
782{ 900{
783 struct radix_tree_node *child; 901 struct radix_tree_node *child;
784 unsigned i, n, tag, offset, tags = 0; 902 unsigned i, n, tag, offset, tags = 0;
@@ -813,7 +931,7 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot,
813 } 931 }
814 932
815 for (i = 0; i < n; i++) { 933 for (i = 0; i < n; i++) {
816 struct radix_tree_node *old = slot[i]; 934 struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
817 if (i) { 935 if (i) {
818 rcu_assign_pointer(slot[i], child); 936 rcu_assign_pointer(slot[i], child);
819 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) 937 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
@@ -840,8 +958,8 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot,
840 return n; 958 return n;
841} 959}
842#else 960#else
843static inline int insert_entries(struct radix_tree_node *node, void **slot, 961static inline int insert_entries(struct radix_tree_node *node,
844 void *item, unsigned order, bool replace) 962 void __rcu **slot, void *item, unsigned order, bool replace)
845{ 963{
846 if (*slot) 964 if (*slot)
847 return -EEXIST; 965 return -EEXIST;
@@ -868,7 +986,7 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
868 unsigned order, void *item) 986 unsigned order, void *item)
869{ 987{
870 struct radix_tree_node *node; 988 struct radix_tree_node *node;
871 void **slot; 989 void __rcu **slot;
872 int error; 990 int error;
873 991
874 BUG_ON(radix_tree_is_internal_node(item)); 992 BUG_ON(radix_tree_is_internal_node(item));
@@ -908,16 +1026,17 @@ EXPORT_SYMBOL(__radix_tree_insert);
908 * allocated and @root->rnode is used as a direct slot instead of 1026 * allocated and @root->rnode is used as a direct slot instead of
909 * pointing to a node, in which case *@nodep will be NULL. 1027 * pointing to a node, in which case *@nodep will be NULL.
910 */ 1028 */
911void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, 1029void *__radix_tree_lookup(const struct radix_tree_root *root,
912 struct radix_tree_node **nodep, void ***slotp) 1030 unsigned long index, struct radix_tree_node **nodep,
1031 void __rcu ***slotp)
913{ 1032{
914 struct radix_tree_node *node, *parent; 1033 struct radix_tree_node *node, *parent;
915 unsigned long maxindex; 1034 unsigned long maxindex;
916 void **slot; 1035 void __rcu **slot;
917 1036
918 restart: 1037 restart:
919 parent = NULL; 1038 parent = NULL;
920 slot = (void **)&root->rnode; 1039 slot = (void __rcu **)&root->rnode;
921 radix_tree_load_root(root, &node, &maxindex); 1040 radix_tree_load_root(root, &node, &maxindex);
922 if (index > maxindex) 1041 if (index > maxindex)
923 return NULL; 1042 return NULL;
@@ -952,9 +1071,10 @@ void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
952 * exclusive from other writers. Any dereference of the slot must be done 1071 * exclusive from other writers. Any dereference of the slot must be done
953 * using radix_tree_deref_slot. 1072 * using radix_tree_deref_slot.
954 */ 1073 */
955void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) 1074void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root,
1075 unsigned long index)
956{ 1076{
957 void **slot; 1077 void __rcu **slot;
958 1078
959 if (!__radix_tree_lookup(root, index, NULL, &slot)) 1079 if (!__radix_tree_lookup(root, index, NULL, &slot))
960 return NULL; 1080 return NULL;
@@ -974,75 +1094,76 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
974 * them safely). No RCU barriers are required to access or modify the 1094 * them safely). No RCU barriers are required to access or modify the
975 * returned item, however. 1095 * returned item, however.
976 */ 1096 */
977void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) 1097void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
978{ 1098{
979 return __radix_tree_lookup(root, index, NULL, NULL); 1099 return __radix_tree_lookup(root, index, NULL, NULL);
980} 1100}
981EXPORT_SYMBOL(radix_tree_lookup); 1101EXPORT_SYMBOL(radix_tree_lookup);
982 1102
983static inline int slot_count(struct radix_tree_node *node, 1103static inline void replace_sibling_entries(struct radix_tree_node *node,
984 void **slot) 1104 void __rcu **slot, int count, int exceptional)
985{ 1105{
986 int n = 1;
987#ifdef CONFIG_RADIX_TREE_MULTIORDER 1106#ifdef CONFIG_RADIX_TREE_MULTIORDER
988 void *ptr = node_to_entry(slot); 1107 void *ptr = node_to_entry(slot);
989 unsigned offset = get_slot_offset(node, slot); 1108 unsigned offset = get_slot_offset(node, slot) + 1;
990 int i;
991 1109
992 for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { 1110 while (offset < RADIX_TREE_MAP_SIZE) {
993 if (node->slots[offset + i] != ptr) 1111 if (rcu_dereference_raw(node->slots[offset]) != ptr)
994 break; 1112 break;
995 n++; 1113 if (count < 0) {
1114 node->slots[offset] = NULL;
1115 node->count--;
1116 }
1117 node->exceptional += exceptional;
1118 offset++;
996 } 1119 }
997#endif 1120#endif
998 return n;
999} 1121}
1000 1122
1001static void replace_slot(struct radix_tree_root *root, 1123static void replace_slot(void __rcu **slot, void *item,
1002 struct radix_tree_node *node, 1124 struct radix_tree_node *node, int count, int exceptional)
1003 void **slot, void *item,
1004 bool warn_typeswitch)
1005{ 1125{
1006 void *old = rcu_dereference_raw(*slot); 1126 if (WARN_ON_ONCE(radix_tree_is_internal_node(item)))
1007 int count, exceptional; 1127 return;
1008
1009 WARN_ON_ONCE(radix_tree_is_internal_node(item));
1010
1011 count = !!item - !!old;
1012 exceptional = !!radix_tree_exceptional_entry(item) -
1013 !!radix_tree_exceptional_entry(old);
1014
1015 WARN_ON_ONCE(warn_typeswitch && (count || exceptional));
1016 1128
1017 if (node) { 1129 if (node && (count || exceptional)) {
1018 node->count += count; 1130 node->count += count;
1019 if (exceptional) { 1131 node->exceptional += exceptional;
1020 exceptional *= slot_count(node, slot); 1132 replace_sibling_entries(node, slot, count, exceptional);
1021 node->exceptional += exceptional;
1022 }
1023 } 1133 }
1024 1134
1025 rcu_assign_pointer(*slot, item); 1135 rcu_assign_pointer(*slot, item);
1026} 1136}
1027 1137
1028static inline void delete_sibling_entries(struct radix_tree_node *node, 1138static bool node_tag_get(const struct radix_tree_root *root,
1029 void **slot) 1139 const struct radix_tree_node *node,
1140 unsigned int tag, unsigned int offset)
1030{ 1141{
1031#ifdef CONFIG_RADIX_TREE_MULTIORDER 1142 if (node)
1032 bool exceptional = radix_tree_exceptional_entry(*slot); 1143 return tag_get(node, tag, offset);
1033 void *ptr = node_to_entry(slot); 1144 return root_tag_get(root, tag);
1034 unsigned offset = get_slot_offset(node, slot); 1145}
1035 int i;
1036 1146
1037 for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { 1147/*
1038 if (node->slots[offset + i] != ptr) 1148 * IDR users want to be able to store NULL in the tree, so if the slot isn't
1039 break; 1149 * free, don't adjust the count, even if it's transitioning between NULL and
1040 node->slots[offset + i] = NULL; 1150 * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
1041 node->count--; 1151 * have empty bits, but it only stores NULL in slots when they're being
1042 if (exceptional) 1152 * deleted.
1043 node->exceptional--; 1153 */
1154static int calculate_count(struct radix_tree_root *root,
1155 struct radix_tree_node *node, void __rcu **slot,
1156 void *item, void *old)
1157{
1158 if (is_idr(root)) {
1159 unsigned offset = get_slot_offset(node, slot);
1160 bool free = node_tag_get(root, node, IDR_FREE, offset);
1161 if (!free)
1162 return 0;
1163 if (!old)
1164 return 1;
1044 } 1165 }
1045#endif 1166 return !!item - !!old;
1046} 1167}
1047 1168
1048/** 1169/**
@@ -1059,18 +1180,22 @@ static inline void delete_sibling_entries(struct radix_tree_node *node,
1059 */ 1180 */
1060void __radix_tree_replace(struct radix_tree_root *root, 1181void __radix_tree_replace(struct radix_tree_root *root,
1061 struct radix_tree_node *node, 1182 struct radix_tree_node *node,
1062 void **slot, void *item, 1183 void __rcu **slot, void *item,
1063 radix_tree_update_node_t update_node, void *private) 1184 radix_tree_update_node_t update_node, void *private)
1064{ 1185{
1065 if (!item) 1186 void *old = rcu_dereference_raw(*slot);
1066 delete_sibling_entries(node, slot); 1187 int exceptional = !!radix_tree_exceptional_entry(item) -
1188 !!radix_tree_exceptional_entry(old);
1189 int count = calculate_count(root, node, slot, item, old);
1190
1067 /* 1191 /*
1068 * This function supports replacing exceptional entries and 1192 * This function supports replacing exceptional entries and
1069 * deleting entries, but that needs accounting against the 1193 * deleting entries, but that needs accounting against the
1070 * node unless the slot is root->rnode. 1194 * node unless the slot is root->rnode.
1071 */ 1195 */
1072 replace_slot(root, node, slot, item, 1196 WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->rnode) &&
1073 !node && slot != (void **)&root->rnode); 1197 (count || exceptional));
1198 replace_slot(slot, item, node, count, exceptional);
1074 1199
1075 if (!node) 1200 if (!node)
1076 return; 1201 return;
@@ -1098,9 +1223,9 @@ void __radix_tree_replace(struct radix_tree_root *root,
1098 * radix_tree_iter_replace(). 1223 * radix_tree_iter_replace().
1099 */ 1224 */
1100void radix_tree_replace_slot(struct radix_tree_root *root, 1225void radix_tree_replace_slot(struct radix_tree_root *root,
1101 void **slot, void *item) 1226 void __rcu **slot, void *item)
1102{ 1227{
1103 replace_slot(root, NULL, slot, item, true); 1228 __radix_tree_replace(root, NULL, slot, item, NULL, NULL);
1104} 1229}
1105EXPORT_SYMBOL(radix_tree_replace_slot); 1230EXPORT_SYMBOL(radix_tree_replace_slot);
1106 1231
@@ -1114,7 +1239,8 @@ EXPORT_SYMBOL(radix_tree_replace_slot);
1114 * Caller must hold tree write locked across split and replacement. 1239 * Caller must hold tree write locked across split and replacement.
1115 */ 1240 */
1116void radix_tree_iter_replace(struct radix_tree_root *root, 1241void radix_tree_iter_replace(struct radix_tree_root *root,
1117 const struct radix_tree_iter *iter, void **slot, void *item) 1242 const struct radix_tree_iter *iter,
1243 void __rcu **slot, void *item)
1118{ 1244{
1119 __radix_tree_replace(root, iter->node, slot, item, NULL, NULL); 1245 __radix_tree_replace(root, iter->node, slot, item, NULL, NULL);
1120} 1246}
@@ -1138,7 +1264,7 @@ int radix_tree_join(struct radix_tree_root *root, unsigned long index,
1138 unsigned order, void *item) 1264 unsigned order, void *item)
1139{ 1265{
1140 struct radix_tree_node *node; 1266 struct radix_tree_node *node;
1141 void **slot; 1267 void __rcu **slot;
1142 int error; 1268 int error;
1143 1269
1144 BUG_ON(radix_tree_is_internal_node(item)); 1270 BUG_ON(radix_tree_is_internal_node(item));
@@ -1173,9 +1299,10 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
1173 unsigned order) 1299 unsigned order)
1174{ 1300{
1175 struct radix_tree_node *parent, *node, *child; 1301 struct radix_tree_node *parent, *node, *child;
1176 void **slot; 1302 void __rcu **slot;
1177 unsigned int offset, end; 1303 unsigned int offset, end;
1178 unsigned n, tag, tags = 0; 1304 unsigned n, tag, tags = 0;
1305 gfp_t gfp = root_gfp_mask(root);
1179 1306
1180 if (!__radix_tree_lookup(root, index, &parent, &slot)) 1307 if (!__radix_tree_lookup(root, index, &parent, &slot))
1181 return -ENOENT; 1308 return -ENOENT;
@@ -1189,7 +1316,8 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
1189 tags |= 1 << tag; 1316 tags |= 1 << tag;
1190 1317
1191 for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) { 1318 for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
1192 if (!is_sibling_entry(parent, parent->slots[end])) 1319 if (!is_sibling_entry(parent,
1320 rcu_dereference_raw(parent->slots[end])))
1193 break; 1321 break;
1194 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) 1322 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1195 if (tags & (1 << tag)) 1323 if (tags & (1 << tag))
@@ -1213,14 +1341,15 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
1213 1341
1214 for (;;) { 1342 for (;;) {
1215 if (node->shift > order) { 1343 if (node->shift > order) {
1216 child = radix_tree_node_alloc(root, node, 1344 child = radix_tree_node_alloc(gfp, node, root,
1217 node->shift - RADIX_TREE_MAP_SHIFT, 1345 node->shift - RADIX_TREE_MAP_SHIFT,
1218 offset, 0, 0); 1346 offset, 0, 0);
1219 if (!child) 1347 if (!child)
1220 goto nomem; 1348 goto nomem;
1221 if (node != parent) { 1349 if (node != parent) {
1222 node->count++; 1350 node->count++;
1223 node->slots[offset] = node_to_entry(child); 1351 rcu_assign_pointer(node->slots[offset],
1352 node_to_entry(child));
1224 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) 1353 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1225 if (tags & (1 << tag)) 1354 if (tags & (1 << tag))
1226 tag_set(node, tag, offset); 1355 tag_set(node, tag, offset);
@@ -1262,6 +1391,22 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
1262} 1391}
1263#endif 1392#endif
1264 1393
1394static void node_tag_set(struct radix_tree_root *root,
1395 struct radix_tree_node *node,
1396 unsigned int tag, unsigned int offset)
1397{
1398 while (node) {
1399 if (tag_get(node, tag, offset))
1400 return;
1401 tag_set(node, tag, offset);
1402 offset = node->offset;
1403 node = node->parent;
1404 }
1405
1406 if (!root_tag_get(root, tag))
1407 root_tag_set(root, tag);
1408}
1409
1265/** 1410/**
1266 * radix_tree_tag_set - set a tag on a radix tree node 1411 * radix_tree_tag_set - set a tag on a radix tree node
1267 * @root: radix tree root 1412 * @root: radix tree root
@@ -1303,6 +1448,18 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
1303} 1448}
1304EXPORT_SYMBOL(radix_tree_tag_set); 1449EXPORT_SYMBOL(radix_tree_tag_set);
1305 1450
1451/**
1452 * radix_tree_iter_tag_set - set a tag on the current iterator entry
1453 * @root: radix tree root
1454 * @iter: iterator state
1455 * @tag: tag to set
1456 */
1457void radix_tree_iter_tag_set(struct radix_tree_root *root,
1458 const struct radix_tree_iter *iter, unsigned int tag)
1459{
1460 node_tag_set(root, iter->node, tag, iter_offset(iter));
1461}
1462
1306static void node_tag_clear(struct radix_tree_root *root, 1463static void node_tag_clear(struct radix_tree_root *root,
1307 struct radix_tree_node *node, 1464 struct radix_tree_node *node,
1308 unsigned int tag, unsigned int offset) 1465 unsigned int tag, unsigned int offset)
@@ -1323,34 +1480,6 @@ static void node_tag_clear(struct radix_tree_root *root,
1323 root_tag_clear(root, tag); 1480 root_tag_clear(root, tag);
1324} 1481}
1325 1482
1326static void node_tag_set(struct radix_tree_root *root,
1327 struct radix_tree_node *node,
1328 unsigned int tag, unsigned int offset)
1329{
1330 while (node) {
1331 if (tag_get(node, tag, offset))
1332 return;
1333 tag_set(node, tag, offset);
1334 offset = node->offset;
1335 node = node->parent;
1336 }
1337
1338 if (!root_tag_get(root, tag))
1339 root_tag_set(root, tag);
1340}
1341
1342/**
1343 * radix_tree_iter_tag_set - set a tag on the current iterator entry
1344 * @root: radix tree root
1345 * @iter: iterator state
1346 * @tag: tag to set
1347 */
1348void radix_tree_iter_tag_set(struct radix_tree_root *root,
1349 const struct radix_tree_iter *iter, unsigned int tag)
1350{
1351 node_tag_set(root, iter->node, tag, iter_offset(iter));
1352}
1353
1354/** 1483/**
1355 * radix_tree_tag_clear - clear a tag on a radix tree node 1484 * radix_tree_tag_clear - clear a tag on a radix tree node
1356 * @root: radix tree root 1485 * @root: radix tree root
@@ -1391,6 +1520,18 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
1391EXPORT_SYMBOL(radix_tree_tag_clear); 1520EXPORT_SYMBOL(radix_tree_tag_clear);
1392 1521
1393/** 1522/**
1523 * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
1524 * @root: radix tree root
1525 * @iter: iterator state
1526 * @tag: tag to clear
1527 */
1528void radix_tree_iter_tag_clear(struct radix_tree_root *root,
1529 const struct radix_tree_iter *iter, unsigned int tag)
1530{
1531 node_tag_clear(root, iter->node, tag, iter_offset(iter));
1532}
1533
1534/**
1394 * radix_tree_tag_get - get a tag on a radix tree node 1535 * radix_tree_tag_get - get a tag on a radix tree node
1395 * @root: radix tree root 1536 * @root: radix tree root
1396 * @index: index key 1537 * @index: index key
@@ -1405,7 +1546,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
1405 * the RCU lock is held, unless tag modification and node deletion are excluded 1546 * the RCU lock is held, unless tag modification and node deletion are excluded
1406 * from concurrency. 1547 * from concurrency.
1407 */ 1548 */
1408int radix_tree_tag_get(struct radix_tree_root *root, 1549int radix_tree_tag_get(const struct radix_tree_root *root,
1409 unsigned long index, unsigned int tag) 1550 unsigned long index, unsigned int tag)
1410{ 1551{
1411 struct radix_tree_node *node, *parent; 1552 struct radix_tree_node *node, *parent;
@@ -1417,8 +1558,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
1417 radix_tree_load_root(root, &node, &maxindex); 1558 radix_tree_load_root(root, &node, &maxindex);
1418 if (index > maxindex) 1559 if (index > maxindex)
1419 return 0; 1560 return 0;
1420 if (node == NULL)
1421 return 0;
1422 1561
1423 while (radix_tree_is_internal_node(node)) { 1562 while (radix_tree_is_internal_node(node)) {
1424 unsigned offset; 1563 unsigned offset;
@@ -1426,8 +1565,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
1426 parent = entry_to_node(node); 1565 parent = entry_to_node(node);
1427 offset = radix_tree_descend(parent, &node, index); 1566 offset = radix_tree_descend(parent, &node, index);
1428 1567
1429 if (!node)
1430 return 0;
1431 if (!tag_get(parent, tag, offset)) 1568 if (!tag_get(parent, tag, offset))
1432 return 0; 1569 return 0;
1433 if (node == RADIX_TREE_RETRY) 1570 if (node == RADIX_TREE_RETRY)
@@ -1454,6 +1591,11 @@ static void set_iter_tags(struct radix_tree_iter *iter,
1454 unsigned tag_long = offset / BITS_PER_LONG; 1591 unsigned tag_long = offset / BITS_PER_LONG;
1455 unsigned tag_bit = offset % BITS_PER_LONG; 1592 unsigned tag_bit = offset % BITS_PER_LONG;
1456 1593
1594 if (!node) {
1595 iter->tags = 1;
1596 return;
1597 }
1598
1457 iter->tags = node->tags[tag][tag_long] >> tag_bit; 1599 iter->tags = node->tags[tag][tag_long] >> tag_bit;
1458 1600
1459 /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ 1601 /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
@@ -1468,8 +1610,8 @@ static void set_iter_tags(struct radix_tree_iter *iter,
1468} 1610}
1469 1611
1470#ifdef CONFIG_RADIX_TREE_MULTIORDER 1612#ifdef CONFIG_RADIX_TREE_MULTIORDER
1471static void **skip_siblings(struct radix_tree_node **nodep, 1613static void __rcu **skip_siblings(struct radix_tree_node **nodep,
1472 void **slot, struct radix_tree_iter *iter) 1614 void __rcu **slot, struct radix_tree_iter *iter)
1473{ 1615{
1474 void *sib = node_to_entry(slot - 1); 1616 void *sib = node_to_entry(slot - 1);
1475 1617
@@ -1486,8 +1628,8 @@ static void **skip_siblings(struct radix_tree_node **nodep,
1486 return NULL; 1628 return NULL;
1487} 1629}
1488 1630
1489void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, 1631void __rcu **__radix_tree_next_slot(void __rcu **slot,
1490 unsigned flags) 1632 struct radix_tree_iter *iter, unsigned flags)
1491{ 1633{
1492 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; 1634 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
1493 struct radix_tree_node *node = rcu_dereference_raw(*slot); 1635 struct radix_tree_node *node = rcu_dereference_raw(*slot);
@@ -1540,20 +1682,20 @@ void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter,
1540} 1682}
1541EXPORT_SYMBOL(__radix_tree_next_slot); 1683EXPORT_SYMBOL(__radix_tree_next_slot);
1542#else 1684#else
1543static void **skip_siblings(struct radix_tree_node **nodep, 1685static void __rcu **skip_siblings(struct radix_tree_node **nodep,
1544 void **slot, struct radix_tree_iter *iter) 1686 void __rcu **slot, struct radix_tree_iter *iter)
1545{ 1687{
1546 return slot; 1688 return slot;
1547} 1689}
1548#endif 1690#endif
1549 1691
1550void **radix_tree_iter_resume(void **slot, struct radix_tree_iter *iter) 1692void __rcu **radix_tree_iter_resume(void __rcu **slot,
1693 struct radix_tree_iter *iter)
1551{ 1694{
1552 struct radix_tree_node *node; 1695 struct radix_tree_node *node;
1553 1696
1554 slot++; 1697 slot++;
1555 iter->index = __radix_tree_iter_add(iter, 1); 1698 iter->index = __radix_tree_iter_add(iter, 1);
1556 node = rcu_dereference_raw(*slot);
1557 skip_siblings(&node, slot, iter); 1699 skip_siblings(&node, slot, iter);
1558 iter->next_index = iter->index; 1700 iter->next_index = iter->index;
1559 iter->tags = 0; 1701 iter->tags = 0;
@@ -1569,7 +1711,7 @@ EXPORT_SYMBOL(radix_tree_iter_resume);
1569 * @flags: RADIX_TREE_ITER_* flags and tag index 1711 * @flags: RADIX_TREE_ITER_* flags and tag index
1570 * Returns: pointer to chunk first slot, or NULL if iteration is over 1712 * Returns: pointer to chunk first slot, or NULL if iteration is over
1571 */ 1713 */
1572void **radix_tree_next_chunk(struct radix_tree_root *root, 1714void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
1573 struct radix_tree_iter *iter, unsigned flags) 1715 struct radix_tree_iter *iter, unsigned flags)
1574{ 1716{
1575 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; 1717 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
@@ -1606,7 +1748,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
1606 iter->tags = 1; 1748 iter->tags = 1;
1607 iter->node = NULL; 1749 iter->node = NULL;
1608 __set_iter_shift(iter, 0); 1750 __set_iter_shift(iter, 0);
1609 return (void **)&root->rnode; 1751 return (void __rcu **)&root->rnode;
1610 } 1752 }
1611 1753
1612 do { 1754 do {
@@ -1624,7 +1766,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
1624 offset + 1); 1766 offset + 1);
1625 else 1767 else
1626 while (++offset < RADIX_TREE_MAP_SIZE) { 1768 while (++offset < RADIX_TREE_MAP_SIZE) {
1627 void *slot = node->slots[offset]; 1769 void *slot = rcu_dereference_raw(
1770 node->slots[offset]);
1628 if (is_sibling_entry(node, slot)) 1771 if (is_sibling_entry(node, slot))
1629 continue; 1772 continue;
1630 if (slot) 1773 if (slot)
@@ -1680,11 +1823,11 @@ EXPORT_SYMBOL(radix_tree_next_chunk);
1680 * stored in 'results'. 1823 * stored in 'results'.
1681 */ 1824 */
1682unsigned int 1825unsigned int
1683radix_tree_gang_lookup(struct radix_tree_root *root, void **results, 1826radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
1684 unsigned long first_index, unsigned int max_items) 1827 unsigned long first_index, unsigned int max_items)
1685{ 1828{
1686 struct radix_tree_iter iter; 1829 struct radix_tree_iter iter;
1687 void **slot; 1830 void __rcu **slot;
1688 unsigned int ret = 0; 1831 unsigned int ret = 0;
1689 1832
1690 if (unlikely(!max_items)) 1833 if (unlikely(!max_items))
@@ -1725,12 +1868,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
1725 * protection, radix_tree_deref_slot may fail requiring a retry. 1868 * protection, radix_tree_deref_slot may fail requiring a retry.
1726 */ 1869 */
1727unsigned int 1870unsigned int
1728radix_tree_gang_lookup_slot(struct radix_tree_root *root, 1871radix_tree_gang_lookup_slot(const struct radix_tree_root *root,
1729 void ***results, unsigned long *indices, 1872 void __rcu ***results, unsigned long *indices,
1730 unsigned long first_index, unsigned int max_items) 1873 unsigned long first_index, unsigned int max_items)
1731{ 1874{
1732 struct radix_tree_iter iter; 1875 struct radix_tree_iter iter;
1733 void **slot; 1876 void __rcu **slot;
1734 unsigned int ret = 0; 1877 unsigned int ret = 0;
1735 1878
1736 if (unlikely(!max_items)) 1879 if (unlikely(!max_items))
@@ -1762,12 +1905,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
1762 * returns the number of items which were placed at *@results. 1905 * returns the number of items which were placed at *@results.
1763 */ 1906 */
1764unsigned int 1907unsigned int
1765radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, 1908radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
1766 unsigned long first_index, unsigned int max_items, 1909 unsigned long first_index, unsigned int max_items,
1767 unsigned int tag) 1910 unsigned int tag)
1768{ 1911{
1769 struct radix_tree_iter iter; 1912 struct radix_tree_iter iter;
1770 void **slot; 1913 void __rcu **slot;
1771 unsigned int ret = 0; 1914 unsigned int ret = 0;
1772 1915
1773 if (unlikely(!max_items)) 1916 if (unlikely(!max_items))
@@ -1803,12 +1946,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
1803 * returns the number of slots which were placed at *@results. 1946 * returns the number of slots which were placed at *@results.
1804 */ 1947 */
1805unsigned int 1948unsigned int
1806radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, 1949radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
1807 unsigned long first_index, unsigned int max_items, 1950 void __rcu ***results, unsigned long first_index,
1808 unsigned int tag) 1951 unsigned int max_items, unsigned int tag)
1809{ 1952{
1810 struct radix_tree_iter iter; 1953 struct radix_tree_iter iter;
1811 void **slot; 1954 void __rcu **slot;
1812 unsigned int ret = 0; 1955 unsigned int ret = 0;
1813 1956
1814 if (unlikely(!max_items)) 1957 if (unlikely(!max_items))
@@ -1843,59 +1986,83 @@ void __radix_tree_delete_node(struct radix_tree_root *root,
1843 delete_node(root, node, update_node, private); 1986 delete_node(root, node, update_node, private);
1844} 1987}
1845 1988
1989static bool __radix_tree_delete(struct radix_tree_root *root,
1990 struct radix_tree_node *node, void __rcu **slot)
1991{
1992 void *old = rcu_dereference_raw(*slot);
1993 int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0;
1994 unsigned offset = get_slot_offset(node, slot);
1995 int tag;
1996
1997 if (is_idr(root))
1998 node_tag_set(root, node, IDR_FREE, offset);
1999 else
2000 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
2001 node_tag_clear(root, node, tag, offset);
2002
2003 replace_slot(slot, NULL, node, -1, exceptional);
2004 return node && delete_node(root, node, NULL, NULL);
2005}
2006
1846/** 2007/**
1847 * radix_tree_delete_item - delete an item from a radix tree 2008 * radix_tree_iter_delete - delete the entry at this iterator position
1848 * @root: radix tree root 2009 * @root: radix tree root
1849 * @index: index key 2010 * @iter: iterator state
1850 * @item: expected item 2011 * @slot: pointer to slot
1851 * 2012 *
1852 * Remove @item at @index from the radix tree rooted at @root. 2013 * Delete the entry at the position currently pointed to by the iterator.
2014 * This may result in the current node being freed; if it is, the iterator
2015 * is advanced so that it will not reference the freed memory. This
2016 * function may be called without any locking if there are no other threads
2017 * which can access this tree.
2018 */
2019void radix_tree_iter_delete(struct radix_tree_root *root,
2020 struct radix_tree_iter *iter, void __rcu **slot)
2021{
2022 if (__radix_tree_delete(root, iter->node, slot))
2023 iter->index = iter->next_index;
2024}
2025
2026/**
2027 * radix_tree_delete_item - delete an item from a radix tree
2028 * @root: radix tree root
2029 * @index: index key
2030 * @item: expected item
1853 * 2031 *
1854 * Returns the address of the deleted item, or NULL if it was not present 2032 * Remove @item at @index from the radix tree rooted at @root.
1855 * or the entry at the given @index was not @item. 2033 *
2034 * Return: the deleted entry, or %NULL if it was not present
2035 * or the entry at the given @index was not @item.
1856 */ 2036 */
1857void *radix_tree_delete_item(struct radix_tree_root *root, 2037void *radix_tree_delete_item(struct radix_tree_root *root,
1858 unsigned long index, void *item) 2038 unsigned long index, void *item)
1859{ 2039{
1860 struct radix_tree_node *node; 2040 struct radix_tree_node *node = NULL;
1861 unsigned int offset; 2041 void __rcu **slot;
1862 void **slot;
1863 void *entry; 2042 void *entry;
1864 int tag;
1865 2043
1866 entry = __radix_tree_lookup(root, index, &node, &slot); 2044 entry = __radix_tree_lookup(root, index, &node, &slot);
1867 if (!entry) 2045 if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
2046 get_slot_offset(node, slot))))
1868 return NULL; 2047 return NULL;
1869 2048
1870 if (item && entry != item) 2049 if (item && entry != item)
1871 return NULL; 2050 return NULL;
1872 2051
1873 if (!node) { 2052 __radix_tree_delete(root, node, slot);
1874 root_tag_clear_all(root);
1875 root->rnode = NULL;
1876 return entry;
1877 }
1878
1879 offset = get_slot_offset(node, slot);
1880
1881 /* Clear all tags associated with the item to be deleted. */
1882 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1883 node_tag_clear(root, node, tag, offset);
1884
1885 __radix_tree_replace(root, node, slot, NULL, NULL, NULL);
1886 2053
1887 return entry; 2054 return entry;
1888} 2055}
1889EXPORT_SYMBOL(radix_tree_delete_item); 2056EXPORT_SYMBOL(radix_tree_delete_item);
1890 2057
1891/** 2058/**
1892 * radix_tree_delete - delete an item from a radix tree 2059 * radix_tree_delete - delete an entry from a radix tree
1893 * @root: radix tree root 2060 * @root: radix tree root
1894 * @index: index key 2061 * @index: index key
1895 * 2062 *
1896 * Remove the item at @index from the radix tree rooted at @root. 2063 * Remove the entry at @index from the radix tree rooted at @root.
1897 * 2064 *
1898 * Returns the address of the deleted item, or NULL if it was not present. 2065 * Return: The deleted entry, or %NULL if it was not present.
1899 */ 2066 */
1900void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) 2067void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1901{ 2068{
@@ -1905,15 +2072,14 @@ EXPORT_SYMBOL(radix_tree_delete);
1905 2072
1906void radix_tree_clear_tags(struct radix_tree_root *root, 2073void radix_tree_clear_tags(struct radix_tree_root *root,
1907 struct radix_tree_node *node, 2074 struct radix_tree_node *node,
1908 void **slot) 2075 void __rcu **slot)
1909{ 2076{
1910 if (node) { 2077 if (node) {
1911 unsigned int tag, offset = get_slot_offset(node, slot); 2078 unsigned int tag, offset = get_slot_offset(node, slot);
1912 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) 2079 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1913 node_tag_clear(root, node, tag, offset); 2080 node_tag_clear(root, node, tag, offset);
1914 } else { 2081 } else {
1915 /* Clear root node tags */ 2082 root_tag_clear_all(root);
1916 root->gfp_mask &= __GFP_BITS_MASK;
1917 } 2083 }
1918} 2084}
1919 2085
@@ -1922,12 +2088,147 @@ void radix_tree_clear_tags(struct radix_tree_root *root,
1922 * @root: radix tree root 2088 * @root: radix tree root
1923 * @tag: tag to test 2089 * @tag: tag to test
1924 */ 2090 */
1925int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) 2091int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
1926{ 2092{
1927 return root_tag_get(root, tag); 2093 return root_tag_get(root, tag);
1928} 2094}
1929EXPORT_SYMBOL(radix_tree_tagged); 2095EXPORT_SYMBOL(radix_tree_tagged);
1930 2096
2097/**
2098 * idr_preload - preload for idr_alloc()
2099 * @gfp_mask: allocation mask to use for preloading
2100 *
2101 * Preallocate memory to use for the next call to idr_alloc(). This function
2102 * returns with preemption disabled. It will be enabled by idr_preload_end().
2103 */
2104void idr_preload(gfp_t gfp_mask)
2105{
2106 __radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE);
2107}
2108EXPORT_SYMBOL(idr_preload);
2109
2110/**
2111 * ida_pre_get - reserve resources for ida allocation
2112 * @ida: ida handle
2113 * @gfp: memory allocation flags
2114 *
2115 * This function should be called before calling ida_get_new_above(). If it
2116 * is unable to allocate memory, it will return %0. On success, it returns %1.
2117 */
2118int ida_pre_get(struct ida *ida, gfp_t gfp)
2119{
2120 __radix_tree_preload(gfp, IDA_PRELOAD_SIZE);
2121 /*
2122 * The IDA API has no preload_end() equivalent. Instead,
2123 * ida_get_new() can return -EAGAIN, prompting the caller
2124 * to return to the ida_pre_get() step.
2125 */
2126 preempt_enable();
2127
2128 if (!this_cpu_read(ida_bitmap)) {
2129 struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
2130 if (!bitmap)
2131 return 0;
2132 bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
2133 kfree(bitmap);
2134 }
2135
2136 return 1;
2137}
2138EXPORT_SYMBOL(ida_pre_get);
2139
2140void __rcu **idr_get_free(struct radix_tree_root *root,
2141 struct radix_tree_iter *iter, gfp_t gfp, int end)
2142{
2143 struct radix_tree_node *node = NULL, *child;
2144 void __rcu **slot = (void __rcu **)&root->rnode;
2145 unsigned long maxindex, start = iter->next_index;
2146 unsigned long max = end > 0 ? end - 1 : INT_MAX;
2147 unsigned int shift, offset = 0;
2148
2149 grow:
2150 shift = radix_tree_load_root(root, &child, &maxindex);
2151 if (!radix_tree_tagged(root, IDR_FREE))
2152 start = max(start, maxindex + 1);
2153 if (start > max)
2154 return ERR_PTR(-ENOSPC);
2155
2156 if (start > maxindex) {
2157 int error = radix_tree_extend(root, gfp, start, shift);
2158 if (error < 0)
2159 return ERR_PTR(error);
2160 shift = error;
2161 child = rcu_dereference_raw(root->rnode);
2162 }
2163
2164 while (shift) {
2165 shift -= RADIX_TREE_MAP_SHIFT;
2166 if (child == NULL) {
2167 /* Have to add a child node. */
2168 child = radix_tree_node_alloc(gfp, node, root, shift,
2169 offset, 0, 0);
2170 if (!child)
2171 return ERR_PTR(-ENOMEM);
2172 all_tag_set(child, IDR_FREE);
2173 rcu_assign_pointer(*slot, node_to_entry(child));
2174 if (node)
2175 node->count++;
2176 } else if (!radix_tree_is_internal_node(child))
2177 break;
2178
2179 node = entry_to_node(child);
2180 offset = radix_tree_descend(node, &child, start);
2181 if (!tag_get(node, IDR_FREE, offset)) {
2182 offset = radix_tree_find_next_bit(node, IDR_FREE,
2183 offset + 1);
2184 start = next_index(start, node, offset);
2185 if (start > max)
2186 return ERR_PTR(-ENOSPC);
2187 while (offset == RADIX_TREE_MAP_SIZE) {
2188 offset = node->offset + 1;
2189 node = node->parent;
2190 if (!node)
2191 goto grow;
2192 shift = node->shift;
2193 }
2194 child = rcu_dereference_raw(node->slots[offset]);
2195 }
2196 slot = &node->slots[offset];
2197 }
2198
2199 iter->index = start;
2200 if (node)
2201 iter->next_index = 1 + min(max, (start | node_maxindex(node)));
2202 else
2203 iter->next_index = 1;
2204 iter->node = node;
2205 __set_iter_shift(iter, shift);
2206 set_iter_tags(iter, node, offset, IDR_FREE);
2207
2208 return slot;
2209}
2210
2211/**
2212 * idr_destroy - release all internal memory from an IDR
2213 * @idr: idr handle
2214 *
2215 * After this function is called, the IDR is empty, and may be reused or
2216 * the data structure containing it may be freed.
2217 *
2218 * A typical clean-up sequence for objects stored in an idr tree will use
2219 * idr_for_each() to free all objects, if necessary, then idr_destroy() to
2220 * free the memory used to keep track of those objects.
2221 */
2222void idr_destroy(struct idr *idr)
2223{
2224 struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode);
2225 if (radix_tree_is_internal_node(node))
2226 radix_tree_free_nodes(node);
2227 idr->idr_rt.rnode = NULL;
2228 root_tag_set(&idr->idr_rt, IDR_FREE);
2229}
2230EXPORT_SYMBOL(idr_destroy);
2231
1931static void 2232static void
1932radix_tree_node_ctor(void *arg) 2233radix_tree_node_ctor(void *arg)
1933{ 2234{
@@ -1971,10 +2272,12 @@ static int radix_tree_cpu_dead(unsigned int cpu)
1971 rtp = &per_cpu(radix_tree_preloads, cpu); 2272 rtp = &per_cpu(radix_tree_preloads, cpu);
1972 while (rtp->nr) { 2273 while (rtp->nr) {
1973 node = rtp->nodes; 2274 node = rtp->nodes;
1974 rtp->nodes = node->private_data; 2275 rtp->nodes = node->parent;
1975 kmem_cache_free(radix_tree_node_cachep, node); 2276 kmem_cache_free(radix_tree_node_cachep, node);
1976 rtp->nr--; 2277 rtp->nr--;
1977 } 2278 }
2279 kfree(per_cpu(ida_bitmap, cpu));
2280 per_cpu(ida_bitmap, cpu) = NULL;
1978 return 0; 2281 return 0;
1979} 2282}
1980 2283
diff --git a/mm/workingset.c b/mm/workingset.c
index 79ed5364375d..ac839fca0e76 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -355,10 +355,8 @@ void workingset_update_node(struct radix_tree_node *node, void *private)
355 * as node->private_list is protected by &mapping->tree_lock. 355 * as node->private_list is protected by &mapping->tree_lock.
356 */ 356 */
357 if (node->count && node->count == node->exceptional) { 357 if (node->count && node->count == node->exceptional) {
358 if (list_empty(&node->private_list)) { 358 if (list_empty(&node->private_list))
359 node->private_data = mapping;
360 list_lru_add(&shadow_nodes, &node->private_list); 359 list_lru_add(&shadow_nodes, &node->private_list);
361 }
362 } else { 360 } else {
363 if (!list_empty(&node->private_list)) 361 if (!list_empty(&node->private_list))
364 list_lru_del(&shadow_nodes, &node->private_list); 362 list_lru_del(&shadow_nodes, &node->private_list);
@@ -436,7 +434,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
436 */ 434 */
437 435
438 node = container_of(item, struct radix_tree_node, private_list); 436 node = container_of(item, struct radix_tree_node, private_list);
439 mapping = node->private_data; 437 mapping = container_of(node->root, struct address_space, page_tree);
440 438
441 /* Coming from the list, invert the lock order */ 439 /* Coming from the list, invert the lock order */
442 if (!spin_trylock(&mapping->tree_lock)) { 440 if (!spin_trylock(&mapping->tree_lock)) {
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index a3af6e1bfd98..0dd7c351002d 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -462,9 +462,7 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
462 unsigned long flags; 462 unsigned long flags;
463 463
464 spin_lock_irqsave(&local->ack_status_lock, flags); 464 spin_lock_irqsave(&local->ack_status_lock, flags);
465 skb = idr_find(&local->ack_status_frames, info->ack_frame_id); 465 skb = idr_remove(&local->ack_status_frames, info->ack_frame_id);
466 if (skb)
467 idr_remove(&local->ack_status_frames, info->ack_frame_id);
468 spin_unlock_irqrestore(&local->ack_status_lock, flags); 466 spin_unlock_irqrestore(&local->ack_status_lock, flags);
469 467
470 if (!skb) 468 if (!skb)
diff --git a/tools/include/asm-generic/bitops/atomic.h b/tools/include/asm-generic/bitops/atomic.h
index 18663f59d72f..68b8c1516c5a 100644
--- a/tools/include/asm-generic/bitops/atomic.h
+++ b/tools/include/asm-generic/bitops/atomic.h
@@ -20,4 +20,7 @@ static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
20 (((unsigned long *)addr)[nr / __BITS_PER_LONG])) != 0; 20 (((unsigned long *)addr)[nr / __BITS_PER_LONG])) != 0;
21} 21}
22 22
23#define __set_bit(nr, addr) set_bit(nr, addr)
24#define __clear_bit(nr, addr) clear_bit(nr, addr)
25
23#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_ */ 26#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_ */
diff --git a/tools/include/asm/bug.h b/tools/include/asm/bug.h
index beda1a884b50..4790f047a89c 100644
--- a/tools/include/asm/bug.h
+++ b/tools/include/asm/bug.h
@@ -12,6 +12,14 @@
12 unlikely(__ret_warn_on); \ 12 unlikely(__ret_warn_on); \
13}) 13})
14 14
15#define WARN_ON(condition) ({ \
16 int __ret_warn_on = !!(condition); \
17 if (unlikely(__ret_warn_on)) \
18 __WARN_printf("assertion failed at %s:%d\n", \
19 __FILE__, __LINE__); \
20 unlikely(__ret_warn_on); \
21})
22
15#define WARN_ON_ONCE(condition) ({ \ 23#define WARN_ON_ONCE(condition) ({ \
16 static int __warned; \ 24 static int __warned; \
17 int __ret_warn_once = !!(condition); \ 25 int __ret_warn_once = !!(condition); \
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index eef41d500e9e..e8b9f518e36b 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -4,6 +4,7 @@
4#include <string.h> 4#include <string.h>
5#include <linux/bitops.h> 5#include <linux/bitops.h>
6#include <stdlib.h> 6#include <stdlib.h>
7#include <linux/kernel.h>
7 8
8#define DECLARE_BITMAP(name,bits) \ 9#define DECLARE_BITMAP(name,bits) \
9 unsigned long name[BITS_TO_LONGS(bits)] 10 unsigned long name[BITS_TO_LONGS(bits)]
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index fc446343ff41..1aecad369af5 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -2,7 +2,6 @@
2#define _TOOLS_LINUX_BITOPS_H_ 2#define _TOOLS_LINUX_BITOPS_H_
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5#include <linux/kernel.h>
6#include <linux/compiler.h> 5#include <linux/compiler.h>
7 6
8#ifndef __WORDSIZE 7#ifndef __WORDSIZE
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 6326ede9aece..8de163b17c0d 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -25,6 +25,8 @@
25#endif 25#endif
26 26
27#define __user 27#define __user
28#define __rcu
29#define __read_mostly
28 30
29#ifndef __attribute_const__ 31#ifndef __attribute_const__
30# define __attribute_const__ 32# define __attribute_const__
@@ -54,6 +56,8 @@
54# define unlikely(x) __builtin_expect(!!(x), 0) 56# define unlikely(x) __builtin_expect(!!(x), 0)
55#endif 57#endif
56 58
59#define uninitialized_var(x) x = *(&(x))
60
57#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) 61#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
58 62
59#include <linux/types.h> 63#include <linux/types.h>
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h
new file mode 100644
index 000000000000..58397dcb19d6
--- /dev/null
+++ b/tools/include/linux/spinlock.h
@@ -0,0 +1,5 @@
1#define spinlock_t pthread_mutex_t
2#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER;
3
4#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x)
5#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x)
diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore
index 11d888ca6a92..d4706c0ffceb 100644
--- a/tools/testing/radix-tree/.gitignore
+++ b/tools/testing/radix-tree/.gitignore
@@ -1,2 +1,6 @@
1generated/map-shift.h
2idr.c
3idr-test
1main 4main
5multiorder
2radix-tree.c 6radix-tree.c
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index 3635e4d3eca7..f11315bedefc 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -1,29 +1,47 @@
1 1
2CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE 2CFLAGS += -I. -I../../include -g -O2 -Wall -D_LGPL_SOURCE -fsanitize=address
3LDFLAGS += -lpthread -lurcu 3LDFLAGS += -lpthread -lurcu
4TARGETS = main 4TARGETS = main idr-test multiorder
5OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \ 5CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o
6 regression1.o regression2.o regression3.o multiorder.o \ 6OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
7 iteration_check.o benchmark.o 7 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
8 8
9ifdef BENCHMARK 9ifndef SHIFT
10 CFLAGS += -DBENCHMARK=1 10 SHIFT=3
11endif 11endif
12 12
13targets: $(TARGETS) 13targets: mapshift $(TARGETS)
14 14
15main: $(OFILES) 15main: $(OFILES)
16 $(CC) $(CFLAGS) $(LDFLAGS) $(OFILES) -o main 16 $(CC) $(CFLAGS) $(LDFLAGS) $^ -o main
17
18idr-test: idr-test.o $(CORE_OFILES)
19 $(CC) $(CFLAGS) $(LDFLAGS) $^ -o idr-test
20
21multiorder: multiorder.o $(CORE_OFILES)
22 $(CC) $(CFLAGS) $(LDFLAGS) $^ -o multiorder
17 23
18clean: 24clean:
19 $(RM) -f $(TARGETS) *.o radix-tree.c 25 $(RM) $(TARGETS) *.o radix-tree.c idr.c generated/map-shift.h
20 26
21find_next_bit.o: ../../lib/find_bit.c 27vpath %.c ../../lib
22 $(CC) $(CFLAGS) -c -o $@ $<
23 28
24$(OFILES): *.h */*.h \ 29$(OFILES): *.h */*.h generated/map-shift.h \
25 ../../include/linux/*.h \ 30 ../../include/linux/*.h \
26 ../../../include/linux/radix-tree.h 31 ../../include/asm/*.h \
32 ../../../include/linux/radix-tree.h \
33 ../../../include/linux/idr.h
27 34
28radix-tree.c: ../../../lib/radix-tree.c 35radix-tree.c: ../../../lib/radix-tree.c
29 sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ 36 sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
37
38idr.c: ../../../lib/idr.c
39 sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
40
41.PHONY: mapshift
42
43mapshift:
44 @if ! grep -qw $(SHIFT) generated/map-shift.h; then \
45 echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \
46 generated/map-shift.h; \
47 fi
diff --git a/tools/testing/radix-tree/benchmark.c b/tools/testing/radix-tree/benchmark.c
index 215ca86c7605..9b09ddfe462f 100644
--- a/tools/testing/radix-tree/benchmark.c
+++ b/tools/testing/radix-tree/benchmark.c
@@ -71,7 +71,7 @@ static void benchmark_size(unsigned long size, unsigned long step, int order)
71 tagged = benchmark_iter(&tree, true); 71 tagged = benchmark_iter(&tree, true);
72 normal = benchmark_iter(&tree, false); 72 normal = benchmark_iter(&tree, false);
73 73
74 printf("Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n", 74 printv(2, "Size %ld, step %6ld, order %d tagged %10lld ns, normal %10lld ns\n",
75 size, step, order, tagged, normal); 75 size, step, order, tagged, normal);
76 76
77 item_kill_tree(&tree); 77 item_kill_tree(&tree);
@@ -85,8 +85,8 @@ void benchmark(void)
85 128, 256, 512, 12345, 0}; 85 128, 256, 512, 12345, 0};
86 int c, s; 86 int c, s;
87 87
88 printf("starting benchmarks\n"); 88 printv(1, "starting benchmarks\n");
89 printf("RADIX_TREE_MAP_SHIFT = %d\n", RADIX_TREE_MAP_SHIFT); 89 printv(1, "RADIX_TREE_MAP_SHIFT = %d\n", RADIX_TREE_MAP_SHIFT);
90 90
91 for (c = 0; size[c]; c++) 91 for (c = 0; size[c]; c++)
92 for (s = 0; step[s]; s++) 92 for (s = 0; step[s]; s++)
diff --git a/tools/testing/radix-tree/generated/autoconf.h b/tools/testing/radix-tree/generated/autoconf.h
index ad18cf5a2a3a..cf88dc5b8832 100644
--- a/tools/testing/radix-tree/generated/autoconf.h
+++ b/tools/testing/radix-tree/generated/autoconf.h
@@ -1,3 +1 @@
1#define CONFIG_RADIX_TREE_MULTIORDER 1 #define CONFIG_RADIX_TREE_MULTIORDER 1
2#define CONFIG_SHMEM 1
3#define CONFIG_SWAP 1
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
new file mode 100644
index 000000000000..a26098c6123d
--- /dev/null
+++ b/tools/testing/radix-tree/idr-test.c
@@ -0,0 +1,444 @@
1/*
2 * idr-test.c: Test the IDR API
3 * Copyright (c) 2016 Matthew Wilcox <willy@infradead.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/bitmap.h>
15#include <linux/idr.h>
16#include <linux/slab.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19
20#include "test.h"
21
22#define DUMMY_PTR ((void *)0x12)
23
24int item_idr_free(int id, void *p, void *data)
25{
26 struct item *item = p;
27 assert(item->index == id);
28 free(p);
29
30 return 0;
31}
32
33void item_idr_remove(struct idr *idr, int id)
34{
35 struct item *item = idr_find(idr, id);
36 assert(item->index == id);
37 idr_remove(idr, id);
38 free(item);
39}
40
41void idr_alloc_test(void)
42{
43 unsigned long i;
44 DEFINE_IDR(idr);
45
46 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0);
47 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd);
48 idr_remove(&idr, 0x3ffd);
49 idr_remove(&idr, 0);
50
51 for (i = 0x3ffe; i < 0x4003; i++) {
52 int id;
53 struct item *item;
54
55 if (i < 0x4000)
56 item = item_create(i, 0);
57 else
58 item = item_create(i - 0x3fff, 0);
59
60 id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL);
61 assert(id == item->index);
62 }
63
64 idr_for_each(&idr, item_idr_free, &idr);
65 idr_destroy(&idr);
66}
67
68void idr_replace_test(void)
69{
70 DEFINE_IDR(idr);
71
72 idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL);
73 idr_replace(&idr, &idr, 10);
74
75 idr_destroy(&idr);
76}
77
78/*
79 * Unlike the radix tree, you can put a NULL pointer -- with care -- into
80 * the IDR. Some interfaces, like idr_find() do not distinguish between
81 * "present, value is NULL" and "not present", but that's exactly what some
82 * users want.
83 */
84void idr_null_test(void)
85{
86 int i;
87 DEFINE_IDR(idr);
88
89 assert(idr_is_empty(&idr));
90
91 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
92 assert(!idr_is_empty(&idr));
93 idr_remove(&idr, 0);
94 assert(idr_is_empty(&idr));
95
96 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
97 assert(!idr_is_empty(&idr));
98 idr_destroy(&idr);
99 assert(idr_is_empty(&idr));
100
101 for (i = 0; i < 10; i++) {
102 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i);
103 }
104
105 assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL);
106 assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL);
107 assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR);
108 assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT));
109 idr_remove(&idr, 5);
110 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5);
111 idr_remove(&idr, 5);
112
113 for (i = 0; i < 9; i++) {
114 idr_remove(&idr, i);
115 assert(!idr_is_empty(&idr));
116 }
117 idr_remove(&idr, 8);
118 assert(!idr_is_empty(&idr));
119 idr_remove(&idr, 9);
120 assert(idr_is_empty(&idr));
121
122 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
123 assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT));
124 assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL);
125 assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR);
126
127 idr_destroy(&idr);
128 assert(idr_is_empty(&idr));
129
130 for (i = 1; i < 10; i++) {
131 assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i);
132 }
133
134 idr_destroy(&idr);
135 assert(idr_is_empty(&idr));
136}
137
138void idr_nowait_test(void)
139{
140 unsigned int i;
141 DEFINE_IDR(idr);
142
143 idr_preload(GFP_KERNEL);
144
145 for (i = 0; i < 3; i++) {
146 struct item *item = item_create(i, 0);
147 assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i);
148 }
149
150 idr_preload_end();
151
152 idr_for_each(&idr, item_idr_free, &idr);
153 idr_destroy(&idr);
154}
155
156void idr_checks(void)
157{
158 unsigned long i;
159 DEFINE_IDR(idr);
160
161 for (i = 0; i < 10000; i++) {
162 struct item *item = item_create(i, 0);
163 assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i);
164 }
165
166 assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0);
167
168 for (i = 0; i < 5000; i++)
169 item_idr_remove(&idr, i);
170
171 idr_remove(&idr, 3);
172
173 idr_for_each(&idr, item_idr_free, &idr);
174 idr_destroy(&idr);
175
176 assert(idr_is_empty(&idr));
177
178 idr_remove(&idr, 3);
179 idr_remove(&idr, 0);
180
181 for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
182 struct item *item = item_create(i, 0);
183 assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
184 }
185 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC);
186
187 idr_for_each(&idr, item_idr_free, &idr);
188 idr_destroy(&idr);
189 idr_destroy(&idr);
190
191 assert(idr_is_empty(&idr));
192
193 for (i = 1; i < 10000; i++) {
194 struct item *item = item_create(i, 0);
195 assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
196 }
197
198 idr_for_each(&idr, item_idr_free, &idr);
199 idr_destroy(&idr);
200
201 idr_replace_test();
202 idr_alloc_test();
203 idr_null_test();
204 idr_nowait_test();
205}
206
207/*
208 * Check that we get the correct error when we run out of memory doing
209 * allocations. To ensure we run out of memory, just "forget" to preload.
210 * The first test is for not having a bitmap available, and the second test
211 * is for not being able to allocate a level of the radix tree.
212 */
213void ida_check_nomem(void)
214{
215 DEFINE_IDA(ida);
216 int id, err;
217
218 err = ida_get_new_above(&ida, 256, &id);
219 assert(err == -EAGAIN);
220 err = ida_get_new_above(&ida, 1UL << 30, &id);
221 assert(err == -EAGAIN);
222}
223
224/*
225 * Check what happens when we fill a leaf and then delete it. This may
226 * discover mishandling of IDR_FREE.
227 */
228void ida_check_leaf(void)
229{
230 DEFINE_IDA(ida);
231 int id;
232 unsigned long i;
233
234 for (i = 0; i < IDA_BITMAP_BITS; i++) {
235 assert(ida_pre_get(&ida, GFP_KERNEL));
236 assert(!ida_get_new(&ida, &id));
237 assert(id == i);
238 }
239
240 ida_destroy(&ida);
241 assert(ida_is_empty(&ida));
242
243 assert(ida_pre_get(&ida, GFP_KERNEL));
244 assert(!ida_get_new(&ida, &id));
245 assert(id == 0);
246 ida_destroy(&ida);
247 assert(ida_is_empty(&ida));
248}
249
250/*
251 * Check handling of conversions between exceptional entries and full bitmaps.
252 */
253void ida_check_conv(void)
254{
255 DEFINE_IDA(ida);
256 int id;
257 unsigned long i;
258
259 for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
260 assert(ida_pre_get(&ida, GFP_KERNEL));
261 assert(!ida_get_new_above(&ida, i + 1, &id));
262 assert(id == i + 1);
263 assert(!ida_get_new_above(&ida, i + BITS_PER_LONG, &id));
264 assert(id == i + BITS_PER_LONG);
265 ida_remove(&ida, i + 1);
266 ida_remove(&ida, i + BITS_PER_LONG);
267 assert(ida_is_empty(&ida));
268 }
269
270 assert(ida_pre_get(&ida, GFP_KERNEL));
271
272 for (i = 0; i < IDA_BITMAP_BITS * 2; i++) {
273 assert(ida_pre_get(&ida, GFP_KERNEL));
274 assert(!ida_get_new(&ida, &id));
275 assert(id == i);
276 }
277
278 for (i = IDA_BITMAP_BITS * 2; i > 0; i--) {
279 ida_remove(&ida, i - 1);
280 }
281 assert(ida_is_empty(&ida));
282
283 for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++) {
284 assert(ida_pre_get(&ida, GFP_KERNEL));
285 assert(!ida_get_new(&ida, &id));
286 assert(id == i);
287 }
288
289 for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--) {
290 ida_remove(&ida, i - 1);
291 }
292 assert(ida_is_empty(&ida));
293
294 radix_tree_cpu_dead(1);
295 for (i = 0; i < 1000000; i++) {
296 int err = ida_get_new(&ida, &id);
297 if (err == -EAGAIN) {
298 assert((i % IDA_BITMAP_BITS) == (BITS_PER_LONG - 2));
299 assert(ida_pre_get(&ida, GFP_KERNEL));
300 err = ida_get_new(&ida, &id);
301 } else {
302 assert((i % IDA_BITMAP_BITS) != (BITS_PER_LONG - 2));
303 }
304 assert(!err);
305 assert(id == i);
306 }
307 ida_destroy(&ida);
308}
309
310/*
311 * Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
312 * Allocating up to 2^31-1 should succeed, and then allocating the next one
313 * should fail.
314 */
315void ida_check_max(void)
316{
317 DEFINE_IDA(ida);
318 int id, err;
319 unsigned long i, j;
320
321 for (j = 1; j < 65537; j *= 2) {
322 unsigned long base = (1UL << 31) - j;
323 for (i = 0; i < j; i++) {
324 assert(ida_pre_get(&ida, GFP_KERNEL));
325 assert(!ida_get_new_above(&ida, base, &id));
326 assert(id == base + i);
327 }
328 assert(ida_pre_get(&ida, GFP_KERNEL));
329 err = ida_get_new_above(&ida, base, &id);
330 assert(err == -ENOSPC);
331 ida_destroy(&ida);
332 assert(ida_is_empty(&ida));
333 rcu_barrier();
334 }
335}
336
337void ida_check_random(void)
338{
339 DEFINE_IDA(ida);
340 DECLARE_BITMAP(bitmap, 2048);
341 int id;
342 unsigned int i;
343 time_t s = time(NULL);
344
345 repeat:
346 memset(bitmap, 0, sizeof(bitmap));
347 for (i = 0; i < 100000; i++) {
348 int i = rand();
349 int bit = i & 2047;
350 if (test_bit(bit, bitmap)) {
351 __clear_bit(bit, bitmap);
352 ida_remove(&ida, bit);
353 } else {
354 __set_bit(bit, bitmap);
355 ida_pre_get(&ida, GFP_KERNEL);
356 assert(!ida_get_new_above(&ida, bit, &id));
357 assert(id == bit);
358 }
359 }
360 ida_destroy(&ida);
361 if (time(NULL) < s + 10)
362 goto repeat;
363}
364
365void ida_checks(void)
366{
367 DEFINE_IDA(ida);
368 int id;
369 unsigned long i;
370
371 radix_tree_cpu_dead(1);
372 ida_check_nomem();
373
374 for (i = 0; i < 10000; i++) {
375 assert(ida_pre_get(&ida, GFP_KERNEL));
376 assert(!ida_get_new(&ida, &id));
377 assert(id == i);
378 }
379
380 ida_remove(&ida, 20);
381 ida_remove(&ida, 21);
382 for (i = 0; i < 3; i++) {
383 assert(ida_pre_get(&ida, GFP_KERNEL));
384 assert(!ida_get_new(&ida, &id));
385 if (i == 2)
386 assert(id == 10000);
387 }
388
389 for (i = 0; i < 5000; i++)
390 ida_remove(&ida, i);
391
392 assert(ida_pre_get(&ida, GFP_KERNEL));
393 assert(!ida_get_new_above(&ida, 5000, &id));
394 assert(id == 10001);
395
396 ida_destroy(&ida);
397
398 assert(ida_is_empty(&ida));
399
400 assert(ida_pre_get(&ida, GFP_KERNEL));
401 assert(!ida_get_new_above(&ida, 1, &id));
402 assert(id == 1);
403
404 ida_remove(&ida, id);
405 assert(ida_is_empty(&ida));
406 ida_destroy(&ida);
407 assert(ida_is_empty(&ida));
408
409 assert(ida_pre_get(&ida, GFP_KERNEL));
410 assert(!ida_get_new_above(&ida, 1, &id));
411 ida_destroy(&ida);
412 assert(ida_is_empty(&ida));
413
414 assert(ida_pre_get(&ida, GFP_KERNEL));
415 assert(!ida_get_new_above(&ida, 1, &id));
416 assert(id == 1);
417 assert(ida_pre_get(&ida, GFP_KERNEL));
418 assert(!ida_get_new_above(&ida, 1025, &id));
419 assert(id == 1025);
420 assert(ida_pre_get(&ida, GFP_KERNEL));
421 assert(!ida_get_new_above(&ida, 10000, &id));
422 assert(id == 10000);
423 ida_remove(&ida, 1025);
424 ida_destroy(&ida);
425 assert(ida_is_empty(&ida));
426
427 ida_check_leaf();
428 ida_check_max();
429 ida_check_conv();
430 ida_check_random();
431
432 radix_tree_cpu_dead(1);
433}
434
435int __weak main(void)
436{
437 radix_tree_init();
438 idr_checks();
439 ida_checks();
440 rcu_barrier();
441 if (nr_allocated)
442 printf("nr_allocated = %d\n", nr_allocated);
443 return 0;
444}
diff --git a/tools/testing/radix-tree/iteration_check.c b/tools/testing/radix-tree/iteration_check.c
index 7572b7ed930e..a92bab513701 100644
--- a/tools/testing/radix-tree/iteration_check.c
+++ b/tools/testing/radix-tree/iteration_check.c
@@ -177,7 +177,7 @@ void iteration_test(unsigned order, unsigned test_duration)
177{ 177{
178 int i; 178 int i;
179 179
180 printf("Running %siteration tests for %d seconds\n", 180 printv(1, "Running %siteration tests for %d seconds\n",
181 order > 0 ? "multiorder " : "", test_duration); 181 order > 0 ? "multiorder " : "", test_duration);
182 182
183 max_order = order; 183 max_order = order;
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
index d31ea7c9abec..cf48c8473f48 100644
--- a/tools/testing/radix-tree/linux.c
+++ b/tools/testing/radix-tree/linux.c
@@ -5,7 +5,7 @@
5#include <unistd.h> 5#include <unistd.h>
6#include <assert.h> 6#include <assert.h>
7 7
8#include <linux/mempool.h> 8#include <linux/gfp.h>
9#include <linux/poison.h> 9#include <linux/poison.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/radix-tree.h> 11#include <linux/radix-tree.h>
@@ -13,6 +13,8 @@
13 13
14int nr_allocated; 14int nr_allocated;
15int preempt_count; 15int preempt_count;
16int kmalloc_verbose;
17int test_verbose;
16 18
17struct kmem_cache { 19struct kmem_cache {
18 pthread_mutex_t lock; 20 pthread_mutex_t lock;
@@ -22,27 +24,6 @@ struct kmem_cache {
22 void (*ctor)(void *); 24 void (*ctor)(void *);
23}; 25};
24 26
25void *mempool_alloc(mempool_t *pool, int gfp_mask)
26{
27 return pool->alloc(gfp_mask, pool->data);
28}
29
30void mempool_free(void *element, mempool_t *pool)
31{
32 pool->free(element, pool->data);
33}
34
35mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
36 mempool_free_t *free_fn, void *pool_data)
37{
38 mempool_t *ret = malloc(sizeof(*ret));
39
40 ret->alloc = alloc_fn;
41 ret->free = free_fn;
42 ret->data = pool_data;
43 return ret;
44}
45
46void *kmem_cache_alloc(struct kmem_cache *cachep, int flags) 27void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
47{ 28{
48 struct radix_tree_node *node; 29 struct radix_tree_node *node;
@@ -54,9 +35,9 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
54 if (cachep->nr_objs) { 35 if (cachep->nr_objs) {
55 cachep->nr_objs--; 36 cachep->nr_objs--;
56 node = cachep->objs; 37 node = cachep->objs;
57 cachep->objs = node->private_data; 38 cachep->objs = node->parent;
58 pthread_mutex_unlock(&cachep->lock); 39 pthread_mutex_unlock(&cachep->lock);
59 node->private_data = NULL; 40 node->parent = NULL;
60 } else { 41 } else {
61 pthread_mutex_unlock(&cachep->lock); 42 pthread_mutex_unlock(&cachep->lock);
62 node = malloc(cachep->size); 43 node = malloc(cachep->size);
@@ -65,6 +46,8 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
65 } 46 }
66 47
67 uatomic_inc(&nr_allocated); 48 uatomic_inc(&nr_allocated);
49 if (kmalloc_verbose)
50 printf("Allocating %p from slab\n", node);
68 return node; 51 return node;
69} 52}
70 53
@@ -72,6 +55,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
72{ 55{
73 assert(objp); 56 assert(objp);
74 uatomic_dec(&nr_allocated); 57 uatomic_dec(&nr_allocated);
58 if (kmalloc_verbose)
59 printf("Freeing %p to slab\n", objp);
75 pthread_mutex_lock(&cachep->lock); 60 pthread_mutex_lock(&cachep->lock);
76 if (cachep->nr_objs > 10) { 61 if (cachep->nr_objs > 10) {
77 memset(objp, POISON_FREE, cachep->size); 62 memset(objp, POISON_FREE, cachep->size);
@@ -79,7 +64,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
79 } else { 64 } else {
80 struct radix_tree_node *node = objp; 65 struct radix_tree_node *node = objp;
81 cachep->nr_objs++; 66 cachep->nr_objs++;
82 node->private_data = cachep->objs; 67 node->parent = cachep->objs;
83 cachep->objs = node; 68 cachep->objs = node;
84 } 69 }
85 pthread_mutex_unlock(&cachep->lock); 70 pthread_mutex_unlock(&cachep->lock);
@@ -89,6 +74,8 @@ void *kmalloc(size_t size, gfp_t gfp)
89{ 74{
90 void *ret = malloc(size); 75 void *ret = malloc(size);
91 uatomic_inc(&nr_allocated); 76 uatomic_inc(&nr_allocated);
77 if (kmalloc_verbose)
78 printf("Allocating %p from malloc\n", ret);
92 return ret; 79 return ret;
93} 80}
94 81
@@ -97,6 +84,8 @@ void kfree(void *p)
97 if (!p) 84 if (!p)
98 return; 85 return;
99 uatomic_dec(&nr_allocated); 86 uatomic_dec(&nr_allocated);
87 if (kmalloc_verbose)
88 printf("Freeing %p to malloc\n", p);
100 free(p); 89 free(p);
101} 90}
102 91
diff --git a/tools/testing/radix-tree/linux/bitops.h b/tools/testing/radix-tree/linux/bitops.h
deleted file mode 100644
index a13e9bc76eec..000000000000
--- a/tools/testing/radix-tree/linux/bitops.h
+++ /dev/null
@@ -1,160 +0,0 @@
1#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
2#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
3
4#include <linux/types.h>
5#include <linux/bitops/find.h>
6#include <linux/bitops/hweight.h>
7#include <linux/kernel.h>
8
9#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
10#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
11#define BITS_PER_BYTE 8
12#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
13
14/**
15 * __set_bit - Set a bit in memory
16 * @nr: the bit to set
17 * @addr: the address to start counting from
18 *
19 * Unlike set_bit(), this function is non-atomic and may be reordered.
20 * If it's called on the same region of memory simultaneously, the effect
21 * may be that only one operation succeeds.
22 */
23static inline void __set_bit(int nr, volatile unsigned long *addr)
24{
25 unsigned long mask = BIT_MASK(nr);
26 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
27
28 *p |= mask;
29}
30
31static inline void __clear_bit(int nr, volatile unsigned long *addr)
32{
33 unsigned long mask = BIT_MASK(nr);
34 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
35
36 *p &= ~mask;
37}
38
39/**
40 * __change_bit - Toggle a bit in memory
41 * @nr: the bit to change
42 * @addr: the address to start counting from
43 *
44 * Unlike change_bit(), this function is non-atomic and may be reordered.
45 * If it's called on the same region of memory simultaneously, the effect
46 * may be that only one operation succeeds.
47 */
48static inline void __change_bit(int nr, volatile unsigned long *addr)
49{
50 unsigned long mask = BIT_MASK(nr);
51 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
52
53 *p ^= mask;
54}
55
56/**
57 * __test_and_set_bit - Set a bit and return its old value
58 * @nr: Bit to set
59 * @addr: Address to count from
60 *
61 * This operation is non-atomic and can be reordered.
62 * If two examples of this operation race, one can appear to succeed
63 * but actually fail. You must protect multiple accesses with a lock.
64 */
65static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
66{
67 unsigned long mask = BIT_MASK(nr);
68 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
69 unsigned long old = *p;
70
71 *p = old | mask;
72 return (old & mask) != 0;
73}
74
75/**
76 * __test_and_clear_bit - Clear a bit and return its old value
77 * @nr: Bit to clear
78 * @addr: Address to count from
79 *
80 * This operation is non-atomic and can be reordered.
81 * If two examples of this operation race, one can appear to succeed
82 * but actually fail. You must protect multiple accesses with a lock.
83 */
84static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
85{
86 unsigned long mask = BIT_MASK(nr);
87 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
88 unsigned long old = *p;
89
90 *p = old & ~mask;
91 return (old & mask) != 0;
92}
93
94/* WARNING: non atomic and it can be reordered! */
95static inline int __test_and_change_bit(int nr,
96 volatile unsigned long *addr)
97{
98 unsigned long mask = BIT_MASK(nr);
99 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
100 unsigned long old = *p;
101
102 *p = old ^ mask;
103 return (old & mask) != 0;
104}
105
106/**
107 * test_bit - Determine whether a bit is set
108 * @nr: bit number to test
109 * @addr: Address to start counting from
110 */
111static inline int test_bit(int nr, const volatile unsigned long *addr)
112{
113 return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
114}
115
116/**
117 * __ffs - find first bit in word.
118 * @word: The word to search
119 *
120 * Undefined if no bit exists, so code should check against 0 first.
121 */
122static inline unsigned long __ffs(unsigned long word)
123{
124 int num = 0;
125
126 if ((word & 0xffffffff) == 0) {
127 num += 32;
128 word >>= 32;
129 }
130 if ((word & 0xffff) == 0) {
131 num += 16;
132 word >>= 16;
133 }
134 if ((word & 0xff) == 0) {
135 num += 8;
136 word >>= 8;
137 }
138 if ((word & 0xf) == 0) {
139 num += 4;
140 word >>= 4;
141 }
142 if ((word & 0x3) == 0) {
143 num += 2;
144 word >>= 2;
145 }
146 if ((word & 0x1) == 0)
147 num += 1;
148 return num;
149}
150
151unsigned long find_next_bit(const unsigned long *addr,
152 unsigned long size,
153 unsigned long offset);
154
155static inline unsigned long hweight_long(unsigned long w)
156{
157 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
158}
159
160#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/__ffs.h b/tools/testing/radix-tree/linux/bitops/__ffs.h
deleted file mode 100644
index 9a3274aecf83..000000000000
--- a/tools/testing/radix-tree/linux/bitops/__ffs.h
+++ /dev/null
@@ -1,43 +0,0 @@
1#ifndef _ASM_GENERIC_BITOPS___FFS_H_
2#define _ASM_GENERIC_BITOPS___FFS_H_
3
4#include <asm/types.h>
5
6/**
7 * __ffs - find first bit in word.
8 * @word: The word to search
9 *
10 * Undefined if no bit exists, so code should check against 0 first.
11 */
12static inline unsigned long __ffs(unsigned long word)
13{
14 int num = 0;
15
16#if BITS_PER_LONG == 64
17 if ((word & 0xffffffff) == 0) {
18 num += 32;
19 word >>= 32;
20 }
21#endif
22 if ((word & 0xffff) == 0) {
23 num += 16;
24 word >>= 16;
25 }
26 if ((word & 0xff) == 0) {
27 num += 8;
28 word >>= 8;
29 }
30 if ((word & 0xf) == 0) {
31 num += 4;
32 word >>= 4;
33 }
34 if ((word & 0x3) == 0) {
35 num += 2;
36 word >>= 2;
37 }
38 if ((word & 0x1) == 0)
39 num += 1;
40 return num;
41}
42
43#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/ffs.h b/tools/testing/radix-tree/linux/bitops/ffs.h
deleted file mode 100644
index fbbb43af7dc0..000000000000
--- a/tools/testing/radix-tree/linux/bitops/ffs.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef _ASM_GENERIC_BITOPS_FFS_H_
2#define _ASM_GENERIC_BITOPS_FFS_H_
3
4/**
5 * ffs - find first bit set
6 * @x: the word to search
7 *
8 * This is defined the same way as
9 * the libc and compiler builtin ffs routines, therefore
10 * differs in spirit from the above ffz (man ffs).
11 */
12static inline int ffs(int x)
13{
14 int r = 1;
15
16 if (!x)
17 return 0;
18 if (!(x & 0xffff)) {
19 x >>= 16;
20 r += 16;
21 }
22 if (!(x & 0xff)) {
23 x >>= 8;
24 r += 8;
25 }
26 if (!(x & 0xf)) {
27 x >>= 4;
28 r += 4;
29 }
30 if (!(x & 3)) {
31 x >>= 2;
32 r += 2;
33 }
34 if (!(x & 1)) {
35 x >>= 1;
36 r += 1;
37 }
38 return r;
39}
40
41#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/ffz.h b/tools/testing/radix-tree/linux/bitops/ffz.h
deleted file mode 100644
index 6744bd4cdf46..000000000000
--- a/tools/testing/radix-tree/linux/bitops/ffz.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
2#define _ASM_GENERIC_BITOPS_FFZ_H_
3
4/*
5 * ffz - find first zero in word.
6 * @word: The word to search
7 *
8 * Undefined if no zero exists, so code should check against ~0UL first.
9 */
10#define ffz(x) __ffs(~(x))
11
12#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/find.h b/tools/testing/radix-tree/linux/bitops/find.h
deleted file mode 100644
index 72a51e5a12ef..000000000000
--- a/tools/testing/radix-tree/linux/bitops/find.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef _ASM_GENERIC_BITOPS_FIND_H_
2#define _ASM_GENERIC_BITOPS_FIND_H_
3
4extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
5 size, unsigned long offset);
6
7extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
8 long size, unsigned long offset);
9
10#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
11#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
12
13#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/fls.h b/tools/testing/radix-tree/linux/bitops/fls.h
deleted file mode 100644
index 850859bc5069..000000000000
--- a/tools/testing/radix-tree/linux/bitops/fls.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef _ASM_GENERIC_BITOPS_FLS_H_
2#define _ASM_GENERIC_BITOPS_FLS_H_
3
4/**
5 * fls - find last (most-significant) bit set
6 * @x: the word to search
7 *
8 * This is defined the same way as ffs.
9 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
10 */
11
12static inline int fls(int x)
13{
14 int r = 32;
15
16 if (!x)
17 return 0;
18 if (!(x & 0xffff0000u)) {
19 x <<= 16;
20 r -= 16;
21 }
22 if (!(x & 0xff000000u)) {
23 x <<= 8;
24 r -= 8;
25 }
26 if (!(x & 0xf0000000u)) {
27 x <<= 4;
28 r -= 4;
29 }
30 if (!(x & 0xc0000000u)) {
31 x <<= 2;
32 r -= 2;
33 }
34 if (!(x & 0x80000000u)) {
35 x <<= 1;
36 r -= 1;
37 }
38 return r;
39}
40
41#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/fls64.h b/tools/testing/radix-tree/linux/bitops/fls64.h
deleted file mode 100644
index 1b6b17ce2428..000000000000
--- a/tools/testing/radix-tree/linux/bitops/fls64.h
+++ /dev/null
@@ -1,14 +0,0 @@
1#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
2#define _ASM_GENERIC_BITOPS_FLS64_H_
3
4#include <asm/types.h>
5
6static inline int fls64(__u64 x)
7{
8 __u32 h = x >> 32;
9 if (h)
10 return fls(h) + 32;
11 return fls(x);
12}
13
14#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/hweight.h b/tools/testing/radix-tree/linux/bitops/hweight.h
deleted file mode 100644
index fbbc383771da..000000000000
--- a/tools/testing/radix-tree/linux/bitops/hweight.h
+++ /dev/null
@@ -1,11 +0,0 @@
1#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
2#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
3
4#include <asm/types.h>
5
6extern unsigned int hweight32(unsigned int w);
7extern unsigned int hweight16(unsigned int w);
8extern unsigned int hweight8(unsigned int w);
9extern unsigned long hweight64(__u64 w);
10
11#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/le.h b/tools/testing/radix-tree/linux/bitops/le.h
deleted file mode 100644
index b9c7e5d2d2ad..000000000000
--- a/tools/testing/radix-tree/linux/bitops/le.h
+++ /dev/null
@@ -1,53 +0,0 @@
1#ifndef _ASM_GENERIC_BITOPS_LE_H_
2#define _ASM_GENERIC_BITOPS_LE_H_
3
4#include <asm/types.h>
5#include <asm/byteorder.h>
6
7#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
8#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
9
10#if defined(__LITTLE_ENDIAN)
11
12#define generic_test_le_bit(nr, addr) test_bit(nr, addr)
13#define generic___set_le_bit(nr, addr) __set_bit(nr, addr)
14#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr)
15
16#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr)
17#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr)
18
19#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr)
20#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
21
22#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
23
24#elif defined(__BIG_ENDIAN)
25
26#define generic_test_le_bit(nr, addr) \
27 test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
28#define generic___set_le_bit(nr, addr) \
29 __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
30#define generic___clear_le_bit(nr, addr) \
31 __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
32
33#define generic_test_and_set_le_bit(nr, addr) \
34 test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
35#define generic_test_and_clear_le_bit(nr, addr) \
36 test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
37
38#define generic___test_and_set_le_bit(nr, addr) \
39 __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
40#define generic___test_and_clear_le_bit(nr, addr) \
41 __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
42
43extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
44 unsigned long size, unsigned long offset);
45
46#else
47#error "Please fix <asm/byteorder.h>"
48#endif
49
50#define generic_find_first_zero_le_bit(addr, size) \
51 generic_find_next_zero_le_bit((addr), (size), 0)
52
53#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
diff --git a/tools/testing/radix-tree/linux/bitops/non-atomic.h b/tools/testing/radix-tree/linux/bitops/non-atomic.h
deleted file mode 100644
index 6a1bcb9d2c4a..000000000000
--- a/tools/testing/radix-tree/linux/bitops/non-atomic.h
+++ /dev/null
@@ -1,110 +0,0 @@
1#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
2#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
3
4#include <asm/types.h>
5
6#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
7
8/**
9 * __set_bit - Set a bit in memory
10 * @nr: the bit to set
11 * @addr: the address to start counting from
12 *
13 * Unlike set_bit(), this function is non-atomic and may be reordered.
14 * If it's called on the same region of memory simultaneously, the effect
15 * may be that only one operation succeeds.
16 */
17static inline void __set_bit(int nr, volatile unsigned long *addr)
18{
19 unsigned long mask = BIT_MASK(nr);
20 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
21
22 *p |= mask;
23}
24
25static inline void __clear_bit(int nr, volatile unsigned long *addr)
26{
27 unsigned long mask = BIT_MASK(nr);
28 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
29
30 *p &= ~mask;
31}
32
33/**
34 * __change_bit - Toggle a bit in memory
35 * @nr: the bit to change
36 * @addr: the address to start counting from
37 *
38 * Unlike change_bit(), this function is non-atomic and may be reordered.
39 * If it's called on the same region of memory simultaneously, the effect
40 * may be that only one operation succeeds.
41 */
42static inline void __change_bit(int nr, volatile unsigned long *addr)
43{
44 unsigned long mask = BIT_MASK(nr);
45 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
46
47 *p ^= mask;
48}
49
50/**
51 * __test_and_set_bit - Set a bit and return its old value
52 * @nr: Bit to set
53 * @addr: Address to count from
54 *
55 * This operation is non-atomic and can be reordered.
56 * If two examples of this operation race, one can appear to succeed
57 * but actually fail. You must protect multiple accesses with a lock.
58 */
59static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
60{
61 unsigned long mask = BIT_MASK(nr);
62 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
63 unsigned long old = *p;
64
65 *p = old | mask;
66 return (old & mask) != 0;
67}
68
69/**
70 * __test_and_clear_bit - Clear a bit and return its old value
71 * @nr: Bit to clear
72 * @addr: Address to count from
73 *
74 * This operation is non-atomic and can be reordered.
75 * If two examples of this operation race, one can appear to succeed
76 * but actually fail. You must protect multiple accesses with a lock.
77 */
78static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
79{
80 unsigned long mask = BIT_MASK(nr);
81 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
82 unsigned long old = *p;
83
84 *p = old & ~mask;
85 return (old & mask) != 0;
86}
87
88/* WARNING: non atomic and it can be reordered! */
89static inline int __test_and_change_bit(int nr,
90 volatile unsigned long *addr)
91{
92 unsigned long mask = BIT_MASK(nr);
93 unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
94 unsigned long old = *p;
95
96 *p = old ^ mask;
97 return (old & mask) != 0;
98}
99
100/**
101 * test_bit - Determine whether a bit is set
102 * @nr: bit number to test
103 * @addr: Address to start counting from
104 */
105static inline int test_bit(int nr, const volatile unsigned long *addr)
106{
107 return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
108}
109
110#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/tools/testing/radix-tree/linux/export.h b/tools/testing/radix-tree/linux/export.h
deleted file mode 100644
index b6afd131998d..000000000000
--- a/tools/testing/radix-tree/linux/export.h
+++ /dev/null
@@ -1,2 +0,0 @@
1
2#define EXPORT_SYMBOL(sym)
diff --git a/tools/testing/radix-tree/linux/gfp.h b/tools/testing/radix-tree/linux/gfp.h
index 5b09b2ce6c33..39a0dcb9475a 100644
--- a/tools/testing/radix-tree/linux/gfp.h
+++ b/tools/testing/radix-tree/linux/gfp.h
@@ -1,6 +1,8 @@
1#ifndef _GFP_H 1#ifndef _GFP_H
2#define _GFP_H 2#define _GFP_H
3 3
4#include <linux/types.h>
5
4#define __GFP_BITS_SHIFT 26 6#define __GFP_BITS_SHIFT 26
5#define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 7#define __GFP_BITS_MASK ((gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
6 8
@@ -13,10 +15,12 @@
13#define __GFP_DIRECT_RECLAIM 0x400000u 15#define __GFP_DIRECT_RECLAIM 0x400000u
14#define __GFP_KSWAPD_RECLAIM 0x2000000u 16#define __GFP_KSWAPD_RECLAIM 0x2000000u
15 17
16#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM) 18#define __GFP_RECLAIM (__GFP_DIRECT_RECLAIM|__GFP_KSWAPD_RECLAIM)
19
20#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
21#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
22#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
17 23
18#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
19#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
20 24
21static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) 25static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
22{ 26{
diff --git a/tools/testing/radix-tree/linux/idr.h b/tools/testing/radix-tree/linux/idr.h
new file mode 100644
index 000000000000..4e342f2e37cf
--- /dev/null
+++ b/tools/testing/radix-tree/linux/idr.h
@@ -0,0 +1 @@
#include "../../../../include/linux/idr.h"
diff --git a/tools/testing/radix-tree/linux/init.h b/tools/testing/radix-tree/linux/init.h
index 360cabb3c4e7..1bb0afc21309 100644
--- a/tools/testing/radix-tree/linux/init.h
+++ b/tools/testing/radix-tree/linux/init.h
@@ -1 +1 @@
/* An empty file stub that allows radix-tree.c to compile. */ #define __init
diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h
index 9b43b4975d83..b21a77fddcf7 100644
--- a/tools/testing/radix-tree/linux/kernel.h
+++ b/tools/testing/radix-tree/linux/kernel.h
@@ -1,64 +1,21 @@
1#ifndef _KERNEL_H 1#ifndef _KERNEL_H
2#define _KERNEL_H 2#define _KERNEL_H
3 3
4#include <assert.h> 4#include "../../include/linux/kernel.h"
5#include <string.h> 5#include <string.h>
6#include <stdio.h> 6#include <stdio.h>
7#include <stddef.h>
8#include <limits.h> 7#include <limits.h>
9 8
10#include "../../include/linux/compiler.h" 9#include <linux/compiler.h>
11#include "../../include/linux/err.h" 10#include <linux/err.h>
11#include <linux/bitops.h>
12#include <linux/log2.h>
12#include "../../../include/linux/kconfig.h" 13#include "../../../include/linux/kconfig.h"
13 14
14#ifdef BENCHMARK
15#define RADIX_TREE_MAP_SHIFT 6
16#else
17#define RADIX_TREE_MAP_SHIFT 3
18#endif
19
20#ifndef NULL
21#define NULL 0
22#endif
23
24#define BUG_ON(expr) assert(!(expr))
25#define WARN_ON(expr) assert(!(expr))
26#define __init
27#define __must_check
28#define panic(expr)
29#define printk printf 15#define printk printf
30#define __force
31#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
32#define pr_debug printk 16#define pr_debug printk
33 17#define pr_cont printk
34#define smp_rmb() barrier()
35#define smp_wmb() barrier()
36#define cpu_relax() barrier()
37 18
38#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) 19#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
39 20
40#define container_of(ptr, type, member) ({ \
41 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
42 (type *)( (char *)__mptr - offsetof(type, member) );})
43#define min(a, b) ((a) < (b) ? (a) : (b))
44
45#define cond_resched() sched_yield()
46
47static inline int in_interrupt(void)
48{
49 return 0;
50}
51
52/*
53 * This looks more complex than it should be. But we need to
54 * get the type for the ~ right in round_down (it needs to be
55 * as wide as the result!), and we want to evaluate the macro
56 * arguments just once each.
57 */
58#define __round_mask(x, y) ((__typeof__(x))((y)-1))
59#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
60#define round_down(x, y) ((x) & ~__round_mask(x, y))
61
62#define xchg(ptr, x) uatomic_xchg(ptr, x)
63
64#endif /* _KERNEL_H */ 21#endif /* _KERNEL_H */
diff --git a/tools/testing/radix-tree/linux/mempool.h b/tools/testing/radix-tree/linux/mempool.h
deleted file mode 100644
index 6a2dc55b41d6..000000000000
--- a/tools/testing/radix-tree/linux/mempool.h
+++ /dev/null
@@ -1,16 +0,0 @@
1
2#include <linux/slab.h>
3
4typedef void *(mempool_alloc_t)(int gfp_mask, void *pool_data);
5typedef void (mempool_free_t)(void *element, void *pool_data);
6
7typedef struct {
8 mempool_alloc_t *alloc;
9 mempool_free_t *free;
10 void *data;
11} mempool_t;
12
13void *mempool_alloc(mempool_t *pool, int gfp_mask);
14void mempool_free(void *element, mempool_t *pool);
15mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
16 mempool_free_t *free_fn, void *pool_data);
diff --git a/tools/testing/radix-tree/linux/percpu.h b/tools/testing/radix-tree/linux/percpu.h
index 5837f1d56f17..3ea01a1a88c2 100644
--- a/tools/testing/radix-tree/linux/percpu.h
+++ b/tools/testing/radix-tree/linux/percpu.h
@@ -1,7 +1,10 @@
1 1#define DECLARE_PER_CPU(type, val) extern type val
2#define DEFINE_PER_CPU(type, val) type val 2#define DEFINE_PER_CPU(type, val) type val
3 3
4#define __get_cpu_var(var) var 4#define __get_cpu_var(var) var
5#define this_cpu_ptr(var) var 5#define this_cpu_ptr(var) var
6#define this_cpu_read(var) var
7#define this_cpu_xchg(var, val) uatomic_xchg(&var, val)
8#define this_cpu_cmpxchg(var, old, new) uatomic_cmpxchg(&var, old, new)
6#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 9#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
7#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) 10#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
diff --git a/tools/testing/radix-tree/linux/preempt.h b/tools/testing/radix-tree/linux/preempt.h
index 65c04c226965..35c5ac81529f 100644
--- a/tools/testing/radix-tree/linux/preempt.h
+++ b/tools/testing/radix-tree/linux/preempt.h
@@ -1,4 +1,14 @@
1#ifndef __LINUX_PREEMPT_H
2#define __LINUX_PREEMPT_H
3
1extern int preempt_count; 4extern int preempt_count;
2 5
3#define preempt_disable() uatomic_inc(&preempt_count) 6#define preempt_disable() uatomic_inc(&preempt_count)
4#define preempt_enable() uatomic_dec(&preempt_count) 7#define preempt_enable() uatomic_dec(&preempt_count)
8
9static inline int in_interrupt(void)
10{
11 return 0;
12}
13
14#endif /* __LINUX_PREEMPT_H */
diff --git a/tools/testing/radix-tree/linux/radix-tree.h b/tools/testing/radix-tree/linux/radix-tree.h
index ce694ddd4aea..bf1bb231f9b5 100644
--- a/tools/testing/radix-tree/linux/radix-tree.h
+++ b/tools/testing/radix-tree/linux/radix-tree.h
@@ -1 +1,26 @@
1#ifndef _TEST_RADIX_TREE_H
2#define _TEST_RADIX_TREE_H
3
4#include "generated/map-shift.h"
1#include "../../../../include/linux/radix-tree.h" 5#include "../../../../include/linux/radix-tree.h"
6
7extern int kmalloc_verbose;
8extern int test_verbose;
9
10static inline void trace_call_rcu(struct rcu_head *head,
11 void (*func)(struct rcu_head *head))
12{
13 if (kmalloc_verbose)
14 printf("Delaying free of %p to slab\n", (char *)head -
15 offsetof(struct radix_tree_node, rcu_head));
16 call_rcu(head, func);
17}
18
19#define printv(verbosity_level, fmt, ...) \
20 if(test_verbose >= verbosity_level) \
21 printf(fmt, ##__VA_ARGS__)
22
23#undef call_rcu
24#define call_rcu(x, y) trace_call_rcu(x, y)
25
26#endif /* _TEST_RADIX_TREE_H */
diff --git a/tools/testing/radix-tree/linux/types.h b/tools/testing/radix-tree/linux/types.h
deleted file mode 100644
index 8491d89873bb..000000000000
--- a/tools/testing/radix-tree/linux/types.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef _TYPES_H
2#define _TYPES_H
3
4#include "../../include/linux/types.h"
5
6#define __rcu
7#define __read_mostly
8
9static inline void INIT_LIST_HEAD(struct list_head *list)
10{
11 list->next = list;
12 list->prev = list;
13}
14
15typedef struct {
16 unsigned int x;
17} spinlock_t;
18
19#define uninitialized_var(x) x = x
20
21#include <linux/gfp.h>
22
23#endif
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c
index f7e9801a6754..b829127d5670 100644
--- a/tools/testing/radix-tree/main.c
+++ b/tools/testing/radix-tree/main.c
@@ -3,6 +3,7 @@
3#include <unistd.h> 3#include <unistd.h>
4#include <time.h> 4#include <time.h>
5#include <assert.h> 5#include <assert.h>
6#include <limits.h>
6 7
7#include <linux/slab.h> 8#include <linux/slab.h>
8#include <linux/radix-tree.h> 9#include <linux/radix-tree.h>
@@ -67,7 +68,7 @@ void big_gang_check(bool long_run)
67 68
68 for (i = 0; i < (long_run ? 1000 : 3); i++) { 69 for (i = 0; i < (long_run ? 1000 : 3); i++) {
69 __big_gang_check(); 70 __big_gang_check();
70 printf("%d ", i); 71 printv(2, "%d ", i);
71 fflush(stdout); 72 fflush(stdout);
72 } 73 }
73} 74}
@@ -128,14 +129,19 @@ void check_copied_tags(struct radix_tree_root *tree, unsigned long start, unsign
128 putchar('.'); */ 129 putchar('.'); */
129 if (idx[i] < start || idx[i] > end) { 130 if (idx[i] < start || idx[i] > end) {
130 if (item_tag_get(tree, idx[i], totag)) { 131 if (item_tag_get(tree, idx[i], totag)) {
131 printf("%lu-%lu: %lu, tags %d-%d\n", start, end, idx[i], item_tag_get(tree, idx[i], fromtag), item_tag_get(tree, idx[i], totag)); 132 printv(2, "%lu-%lu: %lu, tags %d-%d\n", start,
133 end, idx[i], item_tag_get(tree, idx[i],
134 fromtag),
135 item_tag_get(tree, idx[i], totag));
132 } 136 }
133 assert(!item_tag_get(tree, idx[i], totag)); 137 assert(!item_tag_get(tree, idx[i], totag));
134 continue; 138 continue;
135 } 139 }
136 if (item_tag_get(tree, idx[i], fromtag) ^ 140 if (item_tag_get(tree, idx[i], fromtag) ^
137 item_tag_get(tree, idx[i], totag)) { 141 item_tag_get(tree, idx[i], totag)) {
138 printf("%lu-%lu: %lu, tags %d-%d\n", start, end, idx[i], item_tag_get(tree, idx[i], fromtag), item_tag_get(tree, idx[i], totag)); 142 printv(2, "%lu-%lu: %lu, tags %d-%d\n", start, end,
143 idx[i], item_tag_get(tree, idx[i], fromtag),
144 item_tag_get(tree, idx[i], totag));
139 } 145 }
140 assert(!(item_tag_get(tree, idx[i], fromtag) ^ 146 assert(!(item_tag_get(tree, idx[i], fromtag) ^
141 item_tag_get(tree, idx[i], totag))); 147 item_tag_get(tree, idx[i], totag)));
@@ -237,7 +243,7 @@ static void __locate_check(struct radix_tree_root *tree, unsigned long index,
237 item = item_lookup(tree, index); 243 item = item_lookup(tree, index);
238 index2 = find_item(tree, item); 244 index2 = find_item(tree, item);
239 if (index != index2) { 245 if (index != index2) {
240 printf("index %ld order %d inserted; found %ld\n", 246 printv(2, "index %ld order %d inserted; found %ld\n",
241 index, order, index2); 247 index, order, index2);
242 abort(); 248 abort();
243 } 249 }
@@ -288,43 +294,48 @@ static void single_thread_tests(bool long_run)
288{ 294{
289 int i; 295 int i;
290 296
291 printf("starting single_thread_tests: %d allocated, preempt %d\n", 297 printv(1, "starting single_thread_tests: %d allocated, preempt %d\n",
292 nr_allocated, preempt_count); 298 nr_allocated, preempt_count);
293 multiorder_checks(); 299 multiorder_checks();
294 rcu_barrier(); 300 rcu_barrier();
295 printf("after multiorder_check: %d allocated, preempt %d\n", 301 printv(2, "after multiorder_check: %d allocated, preempt %d\n",
296 nr_allocated, preempt_count); 302 nr_allocated, preempt_count);
297 locate_check(); 303 locate_check();
298 rcu_barrier(); 304 rcu_barrier();
299 printf("after locate_check: %d allocated, preempt %d\n", 305 printv(2, "after locate_check: %d allocated, preempt %d\n",
300 nr_allocated, preempt_count); 306 nr_allocated, preempt_count);
301 tag_check(); 307 tag_check();
302 rcu_barrier(); 308 rcu_barrier();
303 printf("after tag_check: %d allocated, preempt %d\n", 309 printv(2, "after tag_check: %d allocated, preempt %d\n",
304 nr_allocated, preempt_count); 310 nr_allocated, preempt_count);
305 gang_check(); 311 gang_check();
306 rcu_barrier(); 312 rcu_barrier();
307 printf("after gang_check: %d allocated, preempt %d\n", 313 printv(2, "after gang_check: %d allocated, preempt %d\n",
308 nr_allocated, preempt_count); 314 nr_allocated, preempt_count);
309 add_and_check(); 315 add_and_check();
310 rcu_barrier(); 316 rcu_barrier();
311 printf("after add_and_check: %d allocated, preempt %d\n", 317 printv(2, "after add_and_check: %d allocated, preempt %d\n",
312 nr_allocated, preempt_count); 318 nr_allocated, preempt_count);
313 dynamic_height_check(); 319 dynamic_height_check();
314 rcu_barrier(); 320 rcu_barrier();
315 printf("after dynamic_height_check: %d allocated, preempt %d\n", 321 printv(2, "after dynamic_height_check: %d allocated, preempt %d\n",
322 nr_allocated, preempt_count);
323 idr_checks();
324 ida_checks();
325 rcu_barrier();
326 printv(2, "after idr_checks: %d allocated, preempt %d\n",
316 nr_allocated, preempt_count); 327 nr_allocated, preempt_count);
317 big_gang_check(long_run); 328 big_gang_check(long_run);
318 rcu_barrier(); 329 rcu_barrier();
319 printf("after big_gang_check: %d allocated, preempt %d\n", 330 printv(2, "after big_gang_check: %d allocated, preempt %d\n",
320 nr_allocated, preempt_count); 331 nr_allocated, preempt_count);
321 for (i = 0; i < (long_run ? 2000 : 3); i++) { 332 for (i = 0; i < (long_run ? 2000 : 3); i++) {
322 copy_tag_check(); 333 copy_tag_check();
323 printf("%d ", i); 334 printv(2, "%d ", i);
324 fflush(stdout); 335 fflush(stdout);
325 } 336 }
326 rcu_barrier(); 337 rcu_barrier();
327 printf("after copy_tag_check: %d allocated, preempt %d\n", 338 printv(2, "after copy_tag_check: %d allocated, preempt %d\n",
328 nr_allocated, preempt_count); 339 nr_allocated, preempt_count);
329} 340}
330 341
@@ -334,24 +345,28 @@ int main(int argc, char **argv)
334 int opt; 345 int opt;
335 unsigned int seed = time(NULL); 346 unsigned int seed = time(NULL);
336 347
337 while ((opt = getopt(argc, argv, "ls:")) != -1) { 348 while ((opt = getopt(argc, argv, "ls:v")) != -1) {
338 if (opt == 'l') 349 if (opt == 'l')
339 long_run = true; 350 long_run = true;
340 else if (opt == 's') 351 else if (opt == 's')
341 seed = strtoul(optarg, NULL, 0); 352 seed = strtoul(optarg, NULL, 0);
353 else if (opt == 'v')
354 test_verbose++;
342 } 355 }
343 356
344 printf("random seed %u\n", seed); 357 printf("random seed %u\n", seed);
345 srand(seed); 358 srand(seed);
346 359
360 printf("running tests\n");
361
347 rcu_register_thread(); 362 rcu_register_thread();
348 radix_tree_init(); 363 radix_tree_init();
349 364
350 regression1_test(); 365 regression1_test();
351 regression2_test(); 366 regression2_test();
352 regression3_test(); 367 regression3_test();
353 iteration_test(0, 10); 368 iteration_test(0, 10 + 90 * long_run);
354 iteration_test(7, 20); 369 iteration_test(7, 10 + 90 * long_run);
355 single_thread_tests(long_run); 370 single_thread_tests(long_run);
356 371
357 /* Free any remaining preallocated nodes */ 372 /* Free any remaining preallocated nodes */
@@ -360,9 +375,11 @@ int main(int argc, char **argv)
360 benchmark(); 375 benchmark();
361 376
362 rcu_barrier(); 377 rcu_barrier();
363 printf("after rcu_barrier: %d allocated, preempt %d\n", 378 printv(2, "after rcu_barrier: %d allocated, preempt %d\n",
364 nr_allocated, preempt_count); 379 nr_allocated, preempt_count);
365 rcu_unregister_thread(); 380 rcu_unregister_thread();
366 381
382 printf("tests completed\n");
383
367 exit(0); 384 exit(0);
368} 385}
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index f79812a5e070..06c71178d07d 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -30,7 +30,7 @@ static void __multiorder_tag_test(int index, int order)
30 /* our canonical entry */ 30 /* our canonical entry */
31 base = index & ~((1 << order) - 1); 31 base = index & ~((1 << order) - 1);
32 32
33 printf("Multiorder tag test with index %d, canonical entry %d\n", 33 printv(2, "Multiorder tag test with index %d, canonical entry %d\n",
34 index, base); 34 index, base);
35 35
36 err = item_insert_order(&tree, index, order); 36 err = item_insert_order(&tree, index, order);
@@ -150,7 +150,7 @@ static void multiorder_check(unsigned long index, int order)
150 struct item *item2 = item_create(min, order); 150 struct item *item2 = item_create(min, order);
151 RADIX_TREE(tree, GFP_KERNEL); 151 RADIX_TREE(tree, GFP_KERNEL);
152 152
153 printf("Multiorder index %ld, order %d\n", index, order); 153 printv(2, "Multiorder index %ld, order %d\n", index, order);
154 154
155 assert(item_insert_order(&tree, index, order) == 0); 155 assert(item_insert_order(&tree, index, order) == 0);
156 156
@@ -188,7 +188,7 @@ static void multiorder_shrink(unsigned long index, int order)
188 RADIX_TREE(tree, GFP_KERNEL); 188 RADIX_TREE(tree, GFP_KERNEL);
189 struct radix_tree_node *node; 189 struct radix_tree_node *node;
190 190
191 printf("Multiorder shrink index %ld, order %d\n", index, order); 191 printv(2, "Multiorder shrink index %ld, order %d\n", index, order);
192 192
193 assert(item_insert_order(&tree, 0, order) == 0); 193 assert(item_insert_order(&tree, 0, order) == 0);
194 194
@@ -209,7 +209,8 @@ static void multiorder_shrink(unsigned long index, int order)
209 item_check_absent(&tree, i); 209 item_check_absent(&tree, i);
210 210
211 if (!item_delete(&tree, 0)) { 211 if (!item_delete(&tree, 0)) {
212 printf("failed to delete index %ld (order %d)\n", index, order); abort(); 212 printv(2, "failed to delete index %ld (order %d)\n", index, order);
213 abort();
213 } 214 }
214 215
215 for (i = 0; i < 2*max; i++) 216 for (i = 0; i < 2*max; i++)
@@ -234,7 +235,7 @@ void multiorder_iteration(void)
234 void **slot; 235 void **slot;
235 int i, j, err; 236 int i, j, err;
236 237
237 printf("Multiorder iteration test\n"); 238 printv(1, "Multiorder iteration test\n");
238 239
239#define NUM_ENTRIES 11 240#define NUM_ENTRIES 11
240 int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128}; 241 int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128};
@@ -275,7 +276,7 @@ void multiorder_tagged_iteration(void)
275 void **slot; 276 void **slot;
276 int i, j; 277 int i, j;
277 278
278 printf("Multiorder tagged iteration test\n"); 279 printv(1, "Multiorder tagged iteration test\n");
279 280
280#define MT_NUM_ENTRIES 9 281#define MT_NUM_ENTRIES 9
281 int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128}; 282 int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128};
@@ -355,6 +356,10 @@ void multiorder_tagged_iteration(void)
355 item_kill_tree(&tree); 356 item_kill_tree(&tree);
356} 357}
357 358
359/*
360 * Basic join checks: make sure we can't find an entry in the tree after
361 * a larger entry has replaced it
362 */
358static void multiorder_join1(unsigned long index, 363static void multiorder_join1(unsigned long index,
359 unsigned order1, unsigned order2) 364 unsigned order1, unsigned order2)
360{ 365{
@@ -373,6 +378,10 @@ static void multiorder_join1(unsigned long index,
373 item_kill_tree(&tree); 378 item_kill_tree(&tree);
374} 379}
375 380
381/*
382 * Check that the accounting of exceptional entries is handled correctly
383 * by joining an exceptional entry to a normal pointer.
384 */
376static void multiorder_join2(unsigned order1, unsigned order2) 385static void multiorder_join2(unsigned order1, unsigned order2)
377{ 386{
378 RADIX_TREE(tree, GFP_KERNEL); 387 RADIX_TREE(tree, GFP_KERNEL);
@@ -386,6 +395,9 @@ static void multiorder_join2(unsigned order1, unsigned order2)
386 assert(item2 == (void *)0x12UL); 395 assert(item2 == (void *)0x12UL);
387 assert(node->exceptional == 1); 396 assert(node->exceptional == 1);
388 397
398 item2 = radix_tree_lookup(&tree, 0);
399 free(item2);
400
389 radix_tree_join(&tree, 0, order1, item1); 401 radix_tree_join(&tree, 0, order1, item1);
390 item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL); 402 item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
391 assert(item2 == item1); 403 assert(item2 == item1);
@@ -453,7 +465,7 @@ static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc)
453{ 465{
454 struct radix_tree_preload *rtp = &radix_tree_preloads; 466 struct radix_tree_preload *rtp = &radix_tree_preloads;
455 if (rtp->nr != 0) 467 if (rtp->nr != 0)
456 printf("split(%u %u) remaining %u\n", old_order, new_order, 468 printv(2, "split(%u %u) remaining %u\n", old_order, new_order,
457 rtp->nr); 469 rtp->nr);
458 /* 470 /*
459 * Can't check for equality here as some nodes may have been 471 * Can't check for equality here as some nodes may have been
@@ -461,7 +473,7 @@ static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc)
461 * nodes allocated since they should have all been preloaded. 473 * nodes allocated since they should have all been preloaded.
462 */ 474 */
463 if (nr_allocated > alloc) 475 if (nr_allocated > alloc)
464 printf("split(%u %u) allocated %u %u\n", old_order, new_order, 476 printv(2, "split(%u %u) allocated %u %u\n", old_order, new_order,
465 alloc, nr_allocated); 477 alloc, nr_allocated);
466} 478}
467 479
@@ -471,6 +483,7 @@ static void __multiorder_split(int old_order, int new_order)
471 void **slot; 483 void **slot;
472 struct radix_tree_iter iter; 484 struct radix_tree_iter iter;
473 unsigned alloc; 485 unsigned alloc;
486 struct item *item;
474 487
475 radix_tree_preload(GFP_KERNEL); 488 radix_tree_preload(GFP_KERNEL);
476 assert(item_insert_order(&tree, 0, old_order) == 0); 489 assert(item_insert_order(&tree, 0, old_order) == 0);
@@ -479,7 +492,7 @@ static void __multiorder_split(int old_order, int new_order)
479 /* Wipe out the preloaded cache or it'll confuse check_mem() */ 492 /* Wipe out the preloaded cache or it'll confuse check_mem() */
480 radix_tree_cpu_dead(0); 493 radix_tree_cpu_dead(0);
481 494
482 radix_tree_tag_set(&tree, 0, 2); 495 item = radix_tree_tag_set(&tree, 0, 2);
483 496
484 radix_tree_split_preload(old_order, new_order, GFP_KERNEL); 497 radix_tree_split_preload(old_order, new_order, GFP_KERNEL);
485 alloc = nr_allocated; 498 alloc = nr_allocated;
@@ -492,6 +505,7 @@ static void __multiorder_split(int old_order, int new_order)
492 radix_tree_preload_end(); 505 radix_tree_preload_end();
493 506
494 item_kill_tree(&tree); 507 item_kill_tree(&tree);
508 free(item);
495} 509}
496 510
497static void __multiorder_split2(int old_order, int new_order) 511static void __multiorder_split2(int old_order, int new_order)
@@ -633,3 +647,10 @@ void multiorder_checks(void)
633 647
634 radix_tree_cpu_dead(0); 648 radix_tree_cpu_dead(0);
635} 649}
650
651int __weak main(void)
652{
653 radix_tree_init();
654 multiorder_checks();
655 return 0;
656}
diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c
index 0d6813a61b37..bf97742fc18c 100644
--- a/tools/testing/radix-tree/regression1.c
+++ b/tools/testing/radix-tree/regression1.c
@@ -193,7 +193,7 @@ void regression1_test(void)
193 long arg; 193 long arg;
194 194
195 /* Regression #1 */ 195 /* Regression #1 */
196 printf("running regression test 1, should finish in under a minute\n"); 196 printv(1, "running regression test 1, should finish in under a minute\n");
197 nr_threads = 2; 197 nr_threads = 2;
198 pthread_barrier_init(&worker_barrier, NULL, nr_threads); 198 pthread_barrier_init(&worker_barrier, NULL, nr_threads);
199 199
@@ -216,5 +216,5 @@ void regression1_test(void)
216 216
217 free(threads); 217 free(threads);
218 218
219 printf("regression test 1, done\n"); 219 printv(1, "regression test 1, done\n");
220} 220}
diff --git a/tools/testing/radix-tree/regression2.c b/tools/testing/radix-tree/regression2.c
index a41325d7a170..42dd2a33ed24 100644
--- a/tools/testing/radix-tree/regression2.c
+++ b/tools/testing/radix-tree/regression2.c
@@ -80,7 +80,7 @@ void regression2_test(void)
80 unsigned long int start, end; 80 unsigned long int start, end;
81 struct page *pages[1]; 81 struct page *pages[1];
82 82
83 printf("running regression test 2 (should take milliseconds)\n"); 83 printv(1, "running regression test 2 (should take milliseconds)\n");
84 /* 0. */ 84 /* 0. */
85 for (i = 0; i <= max_slots - 1; i++) { 85 for (i = 0; i <= max_slots - 1; i++) {
86 p = page_alloc(); 86 p = page_alloc();
@@ -103,7 +103,7 @@ void regression2_test(void)
103 103
104 /* 4. */ 104 /* 4. */
105 for (i = max_slots - 1; i >= 0; i--) 105 for (i = max_slots - 1; i >= 0; i--)
106 radix_tree_delete(&mt_tree, i); 106 free(radix_tree_delete(&mt_tree, i));
107 107
108 /* 5. */ 108 /* 5. */
109 // NOTE: start should not be 0 because radix_tree_gang_lookup_tag_slot 109 // NOTE: start should not be 0 because radix_tree_gang_lookup_tag_slot
@@ -114,7 +114,9 @@ void regression2_test(void)
114 PAGECACHE_TAG_TOWRITE); 114 PAGECACHE_TAG_TOWRITE);
115 115
116 /* We remove all the remained nodes */ 116 /* We remove all the remained nodes */
117 radix_tree_delete(&mt_tree, max_slots); 117 free(radix_tree_delete(&mt_tree, max_slots));
118 118
119 printf("regression test 2, done\n"); 119 BUG_ON(!radix_tree_empty(&mt_tree));
120
121 printv(1, "regression test 2, done\n");
120} 122}
diff --git a/tools/testing/radix-tree/regression3.c b/tools/testing/radix-tree/regression3.c
index b594841fae85..670c3d2ae7b1 100644
--- a/tools/testing/radix-tree/regression3.c
+++ b/tools/testing/radix-tree/regression3.c
@@ -34,21 +34,21 @@ void regression3_test(void)
34 void **slot; 34 void **slot;
35 bool first; 35 bool first;
36 36
37 printf("running regression test 3 (should take milliseconds)\n"); 37 printv(1, "running regression test 3 (should take milliseconds)\n");
38 38
39 radix_tree_insert(&root, 0, ptr0); 39 radix_tree_insert(&root, 0, ptr0);
40 radix_tree_tag_set(&root, 0, 0); 40 radix_tree_tag_set(&root, 0, 0);
41 41
42 first = true; 42 first = true;
43 radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) { 43 radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) {
44 printf("tagged %ld %p\n", iter.index, *slot); 44 printv(2, "tagged %ld %p\n", iter.index, *slot);
45 if (first) { 45 if (first) {
46 radix_tree_insert(&root, 1, ptr); 46 radix_tree_insert(&root, 1, ptr);
47 radix_tree_tag_set(&root, 1, 0); 47 radix_tree_tag_set(&root, 1, 0);
48 first = false; 48 first = false;
49 } 49 }
50 if (radix_tree_deref_retry(*slot)) { 50 if (radix_tree_deref_retry(*slot)) {
51 printf("retry at %ld\n", iter.index); 51 printv(2, "retry at %ld\n", iter.index);
52 slot = radix_tree_iter_retry(&iter); 52 slot = radix_tree_iter_retry(&iter);
53 continue; 53 continue;
54 } 54 }
@@ -57,13 +57,13 @@ void regression3_test(void)
57 57
58 first = true; 58 first = true;
59 radix_tree_for_each_slot(slot, &root, &iter, 0) { 59 radix_tree_for_each_slot(slot, &root, &iter, 0) {
60 printf("slot %ld %p\n", iter.index, *slot); 60 printv(2, "slot %ld %p\n", iter.index, *slot);
61 if (first) { 61 if (first) {
62 radix_tree_insert(&root, 1, ptr); 62 radix_tree_insert(&root, 1, ptr);
63 first = false; 63 first = false;
64 } 64 }
65 if (radix_tree_deref_retry(*slot)) { 65 if (radix_tree_deref_retry(*slot)) {
66 printk("retry at %ld\n", iter.index); 66 printv(2, "retry at %ld\n", iter.index);
67 slot = radix_tree_iter_retry(&iter); 67 slot = radix_tree_iter_retry(&iter);
68 continue; 68 continue;
69 } 69 }
@@ -72,30 +72,30 @@ void regression3_test(void)
72 72
73 first = true; 73 first = true;
74 radix_tree_for_each_contig(slot, &root, &iter, 0) { 74 radix_tree_for_each_contig(slot, &root, &iter, 0) {
75 printk("contig %ld %p\n", iter.index, *slot); 75 printv(2, "contig %ld %p\n", iter.index, *slot);
76 if (first) { 76 if (first) {
77 radix_tree_insert(&root, 1, ptr); 77 radix_tree_insert(&root, 1, ptr);
78 first = false; 78 first = false;
79 } 79 }
80 if (radix_tree_deref_retry(*slot)) { 80 if (radix_tree_deref_retry(*slot)) {
81 printk("retry at %ld\n", iter.index); 81 printv(2, "retry at %ld\n", iter.index);
82 slot = radix_tree_iter_retry(&iter); 82 slot = radix_tree_iter_retry(&iter);
83 continue; 83 continue;
84 } 84 }
85 } 85 }
86 86
87 radix_tree_for_each_slot(slot, &root, &iter, 0) { 87 radix_tree_for_each_slot(slot, &root, &iter, 0) {
88 printf("slot %ld %p\n", iter.index, *slot); 88 printv(2, "slot %ld %p\n", iter.index, *slot);
89 if (!iter.index) { 89 if (!iter.index) {
90 printf("next at %ld\n", iter.index); 90 printv(2, "next at %ld\n", iter.index);
91 slot = radix_tree_iter_resume(slot, &iter); 91 slot = radix_tree_iter_resume(slot, &iter);
92 } 92 }
93 } 93 }
94 94
95 radix_tree_for_each_contig(slot, &root, &iter, 0) { 95 radix_tree_for_each_contig(slot, &root, &iter, 0) {
96 printf("contig %ld %p\n", iter.index, *slot); 96 printv(2, "contig %ld %p\n", iter.index, *slot);
97 if (!iter.index) { 97 if (!iter.index) {
98 printf("next at %ld\n", iter.index); 98 printv(2, "next at %ld\n", iter.index);
99 slot = radix_tree_iter_resume(slot, &iter); 99 slot = radix_tree_iter_resume(slot, &iter);
100 } 100 }
101 } 101 }
@@ -103,9 +103,9 @@ void regression3_test(void)
103 radix_tree_tag_set(&root, 0, 0); 103 radix_tree_tag_set(&root, 0, 0);
104 radix_tree_tag_set(&root, 1, 0); 104 radix_tree_tag_set(&root, 1, 0);
105 radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) { 105 radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) {
106 printf("tagged %ld %p\n", iter.index, *slot); 106 printv(2, "tagged %ld %p\n", iter.index, *slot);
107 if (!iter.index) { 107 if (!iter.index) {
108 printf("next at %ld\n", iter.index); 108 printv(2, "next at %ld\n", iter.index);
109 slot = radix_tree_iter_resume(slot, &iter); 109 slot = radix_tree_iter_resume(slot, &iter);
110 } 110 }
111 } 111 }
@@ -113,5 +113,5 @@ void regression3_test(void)
113 radix_tree_delete(&root, 0); 113 radix_tree_delete(&root, 0);
114 radix_tree_delete(&root, 1); 114 radix_tree_delete(&root, 1);
115 115
116 printf("regression test 3 passed\n"); 116 printv(1, "regression test 3 passed\n");
117} 117}
diff --git a/tools/testing/radix-tree/tag_check.c b/tools/testing/radix-tree/tag_check.c
index fd98c132207a..d4ff00989245 100644
--- a/tools/testing/radix-tree/tag_check.c
+++ b/tools/testing/radix-tree/tag_check.c
@@ -49,10 +49,10 @@ void simple_checks(void)
49 } 49 }
50 verify_tag_consistency(&tree, 0); 50 verify_tag_consistency(&tree, 0);
51 verify_tag_consistency(&tree, 1); 51 verify_tag_consistency(&tree, 1);
52 printf("before item_kill_tree: %d allocated\n", nr_allocated); 52 printv(2, "before item_kill_tree: %d allocated\n", nr_allocated);
53 item_kill_tree(&tree); 53 item_kill_tree(&tree);
54 rcu_barrier(); 54 rcu_barrier();
55 printf("after item_kill_tree: %d allocated\n", nr_allocated); 55 printv(2, "after item_kill_tree: %d allocated\n", nr_allocated);
56} 56}
57 57
58/* 58/*
@@ -257,7 +257,7 @@ static void do_thrash(struct radix_tree_root *tree, char *thrash_state, int tag)
257 257
258 gang_check(tree, thrash_state, tag); 258 gang_check(tree, thrash_state, tag);
259 259
260 printf("%d(%d) %d(%d) %d(%d) %d(%d) / " 260 printv(2, "%d(%d) %d(%d) %d(%d) %d(%d) / "
261 "%d(%d) present, %d(%d) tagged\n", 261 "%d(%d) present, %d(%d) tagged\n",
262 insert_chunk, nr_inserted, 262 insert_chunk, nr_inserted,
263 delete_chunk, nr_deleted, 263 delete_chunk, nr_deleted,
@@ -296,13 +296,13 @@ static void __leak_check(void)
296{ 296{
297 RADIX_TREE(tree, GFP_KERNEL); 297 RADIX_TREE(tree, GFP_KERNEL);
298 298
299 printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); 299 printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated);
300 item_insert(&tree, 1000000); 300 item_insert(&tree, 1000000);
301 printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); 301 printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated);
302 item_delete(&tree, 1000000); 302 item_delete(&tree, 1000000);
303 printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); 303 printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated);
304 item_kill_tree(&tree); 304 item_kill_tree(&tree);
305 printf("%d: nr_allocated=%d\n", __LINE__, nr_allocated); 305 printv(2, "%d: nr_allocated=%d\n", __LINE__, nr_allocated);
306} 306}
307 307
308static void single_check(void) 308static void single_check(void)
@@ -336,15 +336,15 @@ void tag_check(void)
336 extend_checks(); 336 extend_checks();
337 contract_checks(); 337 contract_checks();
338 rcu_barrier(); 338 rcu_barrier();
339 printf("after extend_checks: %d allocated\n", nr_allocated); 339 printv(2, "after extend_checks: %d allocated\n", nr_allocated);
340 __leak_check(); 340 __leak_check();
341 leak_check(); 341 leak_check();
342 rcu_barrier(); 342 rcu_barrier();
343 printf("after leak_check: %d allocated\n", nr_allocated); 343 printv(2, "after leak_check: %d allocated\n", nr_allocated);
344 simple_checks(); 344 simple_checks();
345 rcu_barrier(); 345 rcu_barrier();
346 printf("after simple_checks: %d allocated\n", nr_allocated); 346 printv(2, "after simple_checks: %d allocated\n", nr_allocated);
347 thrash_tags(); 347 thrash_tags();
348 rcu_barrier(); 348 rcu_barrier();
349 printf("after thrash_tags: %d allocated\n", nr_allocated); 349 printv(2, "after thrash_tags: %d allocated\n", nr_allocated);
350} 350}
diff --git a/tools/testing/radix-tree/test.c b/tools/testing/radix-tree/test.c
index e5726e373646..1a257d738a1e 100644
--- a/tools/testing/radix-tree/test.c
+++ b/tools/testing/radix-tree/test.c
@@ -29,15 +29,28 @@ int __item_insert(struct radix_tree_root *root, struct item *item)
29 return __radix_tree_insert(root, item->index, item->order, item); 29 return __radix_tree_insert(root, item->index, item->order, item);
30} 30}
31 31
32int item_insert(struct radix_tree_root *root, unsigned long index) 32struct item *item_create(unsigned long index, unsigned int order)
33{ 33{
34 return __item_insert(root, item_create(index, 0)); 34 struct item *ret = malloc(sizeof(*ret));
35
36 ret->index = index;
37 ret->order = order;
38 return ret;
35} 39}
36 40
37int item_insert_order(struct radix_tree_root *root, unsigned long index, 41int item_insert_order(struct radix_tree_root *root, unsigned long index,
38 unsigned order) 42 unsigned order)
39{ 43{
40 return __item_insert(root, item_create(index, order)); 44 struct item *item = item_create(index, order);
45 int err = __item_insert(root, item);
46 if (err)
47 free(item);
48 return err;
49}
50
51int item_insert(struct radix_tree_root *root, unsigned long index)
52{
53 return item_insert_order(root, index, 0);
41} 54}
42 55
43void item_sanity(struct item *item, unsigned long index) 56void item_sanity(struct item *item, unsigned long index)
@@ -61,15 +74,6 @@ int item_delete(struct radix_tree_root *root, unsigned long index)
61 return 0; 74 return 0;
62} 75}
63 76
64struct item *item_create(unsigned long index, unsigned int order)
65{
66 struct item *ret = malloc(sizeof(*ret));
67
68 ret->index = index;
69 ret->order = order;
70 return ret;
71}
72
73void item_check_present(struct radix_tree_root *root, unsigned long index) 77void item_check_present(struct radix_tree_root *root, unsigned long index)
74{ 78{
75 struct item *item; 79 struct item *item;
diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h
index 056a23b56467..b30e11d9d271 100644
--- a/tools/testing/radix-tree/test.h
+++ b/tools/testing/radix-tree/test.h
@@ -34,6 +34,8 @@ void tag_check(void);
34void multiorder_checks(void); 34void multiorder_checks(void);
35void iteration_test(unsigned order, unsigned duration); 35void iteration_test(unsigned order, unsigned duration);
36void benchmark(void); 36void benchmark(void);
37void idr_checks(void);
38void ida_checks(void);
37 39
38struct item * 40struct item *
39item_tag_set(struct radix_tree_root *root, unsigned long index, int tag); 41item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);