diff options
Diffstat (limited to 'lib/idr.c')
-rw-r--r-- | lib/idr.c | 408 |
1 files changed, 408 insertions, 0 deletions
diff --git a/lib/idr.c b/lib/idr.c new file mode 100644 index 000000000000..81fc430602ee --- /dev/null +++ b/lib/idr.c | |||
@@ -0,0 +1,408 @@ | |||
1 | /* | ||
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com | ||
3 | * Copyright (C) 2002 by Concurrent Computer Corporation | ||
4 | * Distributed under the GNU GPL license version 2. | ||
5 | * | ||
6 | * Modified by George Anzinger to reuse immediately and to use | ||
7 | * find bit instructions. Also removed _irq on spinlocks. | ||
8 | * | ||
9 | * Small id to pointer translation service. | ||
10 | * | ||
11 | * It uses a radix tree like structure as a sparse array indexed | ||
12 | * by the id to obtain the pointer. The bitmap makes allocating | ||
13 | * a new id quick. | ||
14 | * | ||
15 | * You call it to allocate an id (an int) an associate with that id a | ||
16 | * pointer or what ever, we treat it as a (void *). You can pass this | ||
17 | * id to a user for him to pass back at a later time. You then pass | ||
18 | * that id to this code and it returns your pointer. | ||
19 | |||
20 | * You can release ids at any time. When all ids are released, most of | ||
21 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we | ||
22 | * don't need to go to the memory "store" during an id allocate, just | ||
23 | * so you don't need to be too concerned about locking and conflicts | ||
24 | * with the slab allocator. | ||
25 | */ | ||
26 | |||
27 | #ifndef TEST // to test in user space... | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/module.h> | ||
31 | #endif | ||
32 | #include <linux/string.h> | ||
33 | #include <linux/idr.h> | ||
34 | |||
35 | static kmem_cache_t *idr_layer_cache; | ||
36 | |||
37 | static struct idr_layer *alloc_layer(struct idr *idp) | ||
38 | { | ||
39 | struct idr_layer *p; | ||
40 | |||
41 | spin_lock(&idp->lock); | ||
42 | if ((p = idp->id_free)) { | ||
43 | idp->id_free = p->ary[0]; | ||
44 | idp->id_free_cnt--; | ||
45 | p->ary[0] = NULL; | ||
46 | } | ||
47 | spin_unlock(&idp->lock); | ||
48 | return(p); | ||
49 | } | ||
50 | |||
51 | static void free_layer(struct idr *idp, struct idr_layer *p) | ||
52 | { | ||
53 | /* | ||
54 | * Depends on the return element being zeroed. | ||
55 | */ | ||
56 | spin_lock(&idp->lock); | ||
57 | p->ary[0] = idp->id_free; | ||
58 | idp->id_free = p; | ||
59 | idp->id_free_cnt++; | ||
60 | spin_unlock(&idp->lock); | ||
61 | } | ||
62 | |||
63 | /** | ||
64 | * idr_pre_get - reserver resources for idr allocation | ||
65 | * @idp: idr handle | ||
66 | * @gfp_mask: memory allocation flags | ||
67 | * | ||
68 | * This function should be called prior to locking and calling the | ||
69 | * following function. It preallocates enough memory to satisfy | ||
70 | * the worst possible allocation. | ||
71 | * | ||
72 | * If the system is REALLY out of memory this function returns 0, | ||
73 | * otherwise 1. | ||
74 | */ | ||
75 | int idr_pre_get(struct idr *idp, unsigned gfp_mask) | ||
76 | { | ||
77 | while (idp->id_free_cnt < IDR_FREE_MAX) { | ||
78 | struct idr_layer *new; | ||
79 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); | ||
80 | if(new == NULL) | ||
81 | return (0); | ||
82 | free_layer(idp, new); | ||
83 | } | ||
84 | return 1; | ||
85 | } | ||
86 | EXPORT_SYMBOL(idr_pre_get); | ||
87 | |||
88 | static int sub_alloc(struct idr *idp, void *ptr, int *starting_id) | ||
89 | { | ||
90 | int n, m, sh; | ||
91 | struct idr_layer *p, *new; | ||
92 | struct idr_layer *pa[MAX_LEVEL]; | ||
93 | int l, id; | ||
94 | long bm; | ||
95 | |||
96 | id = *starting_id; | ||
97 | p = idp->top; | ||
98 | l = idp->layers; | ||
99 | pa[l--] = NULL; | ||
100 | while (1) { | ||
101 | /* | ||
102 | * We run around this while until we reach the leaf node... | ||
103 | */ | ||
104 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | ||
105 | bm = ~p->bitmap; | ||
106 | m = find_next_bit(&bm, IDR_SIZE, n); | ||
107 | if (m == IDR_SIZE) { | ||
108 | /* no space available go back to previous layer. */ | ||
109 | l++; | ||
110 | id = (id | ((1 << (IDR_BITS*l))-1)) + 1; | ||
111 | if (!(p = pa[l])) { | ||
112 | *starting_id = id; | ||
113 | return -2; | ||
114 | } | ||
115 | continue; | ||
116 | } | ||
117 | if (m != n) { | ||
118 | sh = IDR_BITS*l; | ||
119 | id = ((id >> sh) ^ n ^ m) << sh; | ||
120 | } | ||
121 | if ((id >= MAX_ID_BIT) || (id < 0)) | ||
122 | return -3; | ||
123 | if (l == 0) | ||
124 | break; | ||
125 | /* | ||
126 | * Create the layer below if it is missing. | ||
127 | */ | ||
128 | if (!p->ary[m]) { | ||
129 | if (!(new = alloc_layer(idp))) | ||
130 | return -1; | ||
131 | p->ary[m] = new; | ||
132 | p->count++; | ||
133 | } | ||
134 | pa[l--] = p; | ||
135 | p = p->ary[m]; | ||
136 | } | ||
137 | /* | ||
138 | * We have reached the leaf node, plant the | ||
139 | * users pointer and return the raw id. | ||
140 | */ | ||
141 | p->ary[m] = (struct idr_layer *)ptr; | ||
142 | __set_bit(m, &p->bitmap); | ||
143 | p->count++; | ||
144 | /* | ||
145 | * If this layer is full mark the bit in the layer above | ||
146 | * to show that this part of the radix tree is full. | ||
147 | * This may complete the layer above and require walking | ||
148 | * up the radix tree. | ||
149 | */ | ||
150 | n = id; | ||
151 | while (p->bitmap == IDR_FULL) { | ||
152 | if (!(p = pa[++l])) | ||
153 | break; | ||
154 | n = n >> IDR_BITS; | ||
155 | __set_bit((n & IDR_MASK), &p->bitmap); | ||
156 | } | ||
157 | return(id); | ||
158 | } | ||
159 | |||
160 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | ||
161 | { | ||
162 | struct idr_layer *p, *new; | ||
163 | int layers, v, id; | ||
164 | |||
165 | id = starting_id; | ||
166 | build_up: | ||
167 | p = idp->top; | ||
168 | layers = idp->layers; | ||
169 | if (unlikely(!p)) { | ||
170 | if (!(p = alloc_layer(idp))) | ||
171 | return -1; | ||
172 | layers = 1; | ||
173 | } | ||
174 | /* | ||
175 | * Add a new layer to the top of the tree if the requested | ||
176 | * id is larger than the currently allocated space. | ||
177 | */ | ||
178 | while ((layers < MAX_LEVEL) && (id >= (1 << (layers*IDR_BITS)))) { | ||
179 | layers++; | ||
180 | if (!p->count) | ||
181 | continue; | ||
182 | if (!(new = alloc_layer(idp))) { | ||
183 | /* | ||
184 | * The allocation failed. If we built part of | ||
185 | * the structure tear it down. | ||
186 | */ | ||
187 | for (new = p; p && p != idp->top; new = p) { | ||
188 | p = p->ary[0]; | ||
189 | new->ary[0] = NULL; | ||
190 | new->bitmap = new->count = 0; | ||
191 | free_layer(idp, new); | ||
192 | } | ||
193 | return -1; | ||
194 | } | ||
195 | new->ary[0] = p; | ||
196 | new->count = 1; | ||
197 | if (p->bitmap == IDR_FULL) | ||
198 | __set_bit(0, &new->bitmap); | ||
199 | p = new; | ||
200 | } | ||
201 | idp->top = p; | ||
202 | idp->layers = layers; | ||
203 | v = sub_alloc(idp, ptr, &id); | ||
204 | if (v == -2) | ||
205 | goto build_up; | ||
206 | return(v); | ||
207 | } | ||
208 | |||
209 | /** | ||
210 | * idr_get_new_above - allocate new idr entry above a start id | ||
211 | * @idp: idr handle | ||
212 | * @ptr: pointer you want associated with the ide | ||
213 | * @start_id: id to start search at | ||
214 | * @id: pointer to the allocated handle | ||
215 | * | ||
216 | * This is the allocate id function. It should be called with any | ||
217 | * required locks. | ||
218 | * | ||
219 | * If memory is required, it will return -EAGAIN, you should unlock | ||
220 | * and go back to the idr_pre_get() call. If the idr is full, it will | ||
221 | * return -ENOSPC. | ||
222 | * | ||
223 | * @id returns a value in the range 0 ... 0x7fffffff | ||
224 | */ | ||
225 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | ||
226 | { | ||
227 | int rv; | ||
228 | rv = idr_get_new_above_int(idp, ptr, starting_id); | ||
229 | /* | ||
230 | * This is a cheap hack until the IDR code can be fixed to | ||
231 | * return proper error values. | ||
232 | */ | ||
233 | if (rv < 0) { | ||
234 | if (rv == -1) | ||
235 | return -EAGAIN; | ||
236 | else /* Will be -3 */ | ||
237 | return -ENOSPC; | ||
238 | } | ||
239 | *id = rv; | ||
240 | return 0; | ||
241 | } | ||
242 | EXPORT_SYMBOL(idr_get_new_above); | ||
243 | |||
244 | /** | ||
245 | * idr_get_new - allocate new idr entry | ||
246 | * @idp: idr handle | ||
247 | * @ptr: pointer you want associated with the ide | ||
248 | * @id: pointer to the allocated handle | ||
249 | * | ||
250 | * This is the allocate id function. It should be called with any | ||
251 | * required locks. | ||
252 | * | ||
253 | * If memory is required, it will return -EAGAIN, you should unlock | ||
254 | * and go back to the idr_pre_get() call. If the idr is full, it will | ||
255 | * return -ENOSPC. | ||
256 | * | ||
257 | * @id returns a value in the range 0 ... 0x7fffffff | ||
258 | */ | ||
259 | int idr_get_new(struct idr *idp, void *ptr, int *id) | ||
260 | { | ||
261 | int rv; | ||
262 | rv = idr_get_new_above_int(idp, ptr, 0); | ||
263 | /* | ||
264 | * This is a cheap hack until the IDR code can be fixed to | ||
265 | * return proper error values. | ||
266 | */ | ||
267 | if (rv < 0) { | ||
268 | if (rv == -1) | ||
269 | return -EAGAIN; | ||
270 | else /* Will be -3 */ | ||
271 | return -ENOSPC; | ||
272 | } | ||
273 | *id = rv; | ||
274 | return 0; | ||
275 | } | ||
276 | EXPORT_SYMBOL(idr_get_new); | ||
277 | |||
278 | static void idr_remove_warning(int id) | ||
279 | { | ||
280 | printk("idr_remove called for id=%d which is not allocated.\n", id); | ||
281 | dump_stack(); | ||
282 | } | ||
283 | |||
284 | static void sub_remove(struct idr *idp, int shift, int id) | ||
285 | { | ||
286 | struct idr_layer *p = idp->top; | ||
287 | struct idr_layer **pa[MAX_LEVEL]; | ||
288 | struct idr_layer ***paa = &pa[0]; | ||
289 | int n; | ||
290 | |||
291 | *paa = NULL; | ||
292 | *++paa = &idp->top; | ||
293 | |||
294 | while ((shift > 0) && p) { | ||
295 | n = (id >> shift) & IDR_MASK; | ||
296 | __clear_bit(n, &p->bitmap); | ||
297 | *++paa = &p->ary[n]; | ||
298 | p = p->ary[n]; | ||
299 | shift -= IDR_BITS; | ||
300 | } | ||
301 | n = id & IDR_MASK; | ||
302 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | ||
303 | __clear_bit(n, &p->bitmap); | ||
304 | p->ary[n] = NULL; | ||
305 | while(*paa && ! --((**paa)->count)){ | ||
306 | free_layer(idp, **paa); | ||
307 | **paa-- = NULL; | ||
308 | } | ||
309 | if ( ! *paa ) | ||
310 | idp->layers = 0; | ||
311 | } else { | ||
312 | idr_remove_warning(id); | ||
313 | } | ||
314 | } | ||
315 | |||
316 | /** | ||
317 | * idr_remove - remove the given id and free it's slot | ||
318 | * idp: idr handle | ||
319 | * id: uniqueue key | ||
320 | */ | ||
321 | void idr_remove(struct idr *idp, int id) | ||
322 | { | ||
323 | struct idr_layer *p; | ||
324 | |||
325 | /* Mask off upper bits we don't use for the search. */ | ||
326 | id &= MAX_ID_MASK; | ||
327 | |||
328 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | ||
329 | if ( idp->top && idp->top->count == 1 && | ||
330 | (idp->layers > 1) && | ||
331 | idp->top->ary[0]){ // We can drop a layer | ||
332 | |||
333 | p = idp->top->ary[0]; | ||
334 | idp->top->bitmap = idp->top->count = 0; | ||
335 | free_layer(idp, idp->top); | ||
336 | idp->top = p; | ||
337 | --idp->layers; | ||
338 | } | ||
339 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | ||
340 | |||
341 | p = alloc_layer(idp); | ||
342 | kmem_cache_free(idr_layer_cache, p); | ||
343 | return; | ||
344 | } | ||
345 | } | ||
346 | EXPORT_SYMBOL(idr_remove); | ||
347 | |||
348 | /** | ||
349 | * idr_find - return pointer for given id | ||
350 | * @idp: idr handle | ||
351 | * @id: lookup key | ||
352 | * | ||
353 | * Return the pointer given the id it has been registered with. A %NULL | ||
354 | * return indicates that @id is not valid or you passed %NULL in | ||
355 | * idr_get_new(). | ||
356 | * | ||
357 | * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). | ||
358 | */ | ||
359 | void *idr_find(struct idr *idp, int id) | ||
360 | { | ||
361 | int n; | ||
362 | struct idr_layer *p; | ||
363 | |||
364 | n = idp->layers * IDR_BITS; | ||
365 | p = idp->top; | ||
366 | |||
367 | /* Mask off upper bits we don't use for the search. */ | ||
368 | id &= MAX_ID_MASK; | ||
369 | |||
370 | if (id >= (1 << n)) | ||
371 | return NULL; | ||
372 | |||
373 | while (n > 0 && p) { | ||
374 | n -= IDR_BITS; | ||
375 | p = p->ary[(id >> n) & IDR_MASK]; | ||
376 | } | ||
377 | return((void *)p); | ||
378 | } | ||
379 | EXPORT_SYMBOL(idr_find); | ||
380 | |||
381 | static void idr_cache_ctor(void * idr_layer, | ||
382 | kmem_cache_t *idr_layer_cache, unsigned long flags) | ||
383 | { | ||
384 | memset(idr_layer, 0, sizeof(struct idr_layer)); | ||
385 | } | ||
386 | |||
387 | static int init_id_cache(void) | ||
388 | { | ||
389 | if (!idr_layer_cache) | ||
390 | idr_layer_cache = kmem_cache_create("idr_layer_cache", | ||
391 | sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL); | ||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | /** | ||
396 | * idr_init - initialize idr handle | ||
397 | * @idp: idr handle | ||
398 | * | ||
399 | * This function is use to set up the handle (@idp) that you will pass | ||
400 | * to the rest of the functions. | ||
401 | */ | ||
402 | void idr_init(struct idr *idp) | ||
403 | { | ||
404 | init_id_cache(); | ||
405 | memset(idp, 0, sizeof(struct idr)); | ||
406 | spin_lock_init(&idp->lock); | ||
407 | } | ||
408 | EXPORT_SYMBOL(idr_init); | ||