aboutsummaryrefslogtreecommitdiffstats
path: root/lib/idr.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/idr.c')
-rw-r--r--lib/idr.c142
1 files changed, 83 insertions, 59 deletions
diff --git a/lib/idr.c b/lib/idr.c
index 7a02e173f027..e728c7fccc4d 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -6,6 +6,8 @@
6 * Modified by George Anzinger to reuse immediately and to use 6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks. 7 * find bit instructions. Also removed _irq on spinlocks.
8 * 8 *
9 * Modified by Nadia Derbey to make it RCU safe.
10 *
9 * Small id to pointer translation service. 11 * Small id to pointer translation service.
10 * 12 *
11 * It uses a radix tree like structure as a sparse array indexed 13 * It uses a radix tree like structure as a sparse array indexed
@@ -35,7 +37,7 @@
35 37
36static struct kmem_cache *idr_layer_cache; 38static struct kmem_cache *idr_layer_cache;
37 39
38static struct idr_layer *alloc_layer(struct idr *idp) 40static struct idr_layer *get_from_free_list(struct idr *idp)
39{ 41{
40 struct idr_layer *p; 42 struct idr_layer *p;
41 unsigned long flags; 43 unsigned long flags;
@@ -50,15 +52,28 @@ static struct idr_layer *alloc_layer(struct idr *idp)
50 return(p); 52 return(p);
51} 53}
52 54
55static void idr_layer_rcu_free(struct rcu_head *head)
56{
57 struct idr_layer *layer;
58
59 layer = container_of(head, struct idr_layer, rcu_head);
60 kmem_cache_free(idr_layer_cache, layer);
61}
62
63static inline void free_layer(struct idr_layer *p)
64{
65 call_rcu(&p->rcu_head, idr_layer_rcu_free);
66}
67
53/* only called when idp->lock is held */ 68/* only called when idp->lock is held */
54static void __free_layer(struct idr *idp, struct idr_layer *p) 69static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
55{ 70{
56 p->ary[0] = idp->id_free; 71 p->ary[0] = idp->id_free;
57 idp->id_free = p; 72 idp->id_free = p;
58 idp->id_free_cnt++; 73 idp->id_free_cnt++;
59} 74}
60 75
61static void free_layer(struct idr *idp, struct idr_layer *p) 76static void move_to_free_list(struct idr *idp, struct idr_layer *p)
62{ 77{
63 unsigned long flags; 78 unsigned long flags;
64 79
@@ -66,7 +81,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p)
66 * Depends on the return element being zeroed. 81 * Depends on the return element being zeroed.
67 */ 82 */
68 spin_lock_irqsave(&idp->lock, flags); 83 spin_lock_irqsave(&idp->lock, flags);
69 __free_layer(idp, p); 84 __move_to_free_list(idp, p);
70 spin_unlock_irqrestore(&idp->lock, flags); 85 spin_unlock_irqrestore(&idp->lock, flags);
71} 86}
72 87
@@ -96,7 +111,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
96 * @gfp_mask: memory allocation flags 111 * @gfp_mask: memory allocation flags
97 * 112 *
98 * This function should be called prior to locking and calling the 113 * This function should be called prior to locking and calling the
99 * following function. It preallocates enough memory to satisfy 114 * idr_get_new* functions. It preallocates enough memory to satisfy
100 * the worst possible allocation. 115 * the worst possible allocation.
101 * 116 *
102 * If the system is REALLY out of memory this function returns 0, 117 * If the system is REALLY out of memory this function returns 0,
@@ -109,7 +124,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
109 new = kmem_cache_alloc(idr_layer_cache, gfp_mask); 124 new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
110 if (new == NULL) 125 if (new == NULL)
111 return (0); 126 return (0);
112 free_layer(idp, new); 127 move_to_free_list(idp, new);
113 } 128 }
114 return 1; 129 return 1;
115} 130}
@@ -143,7 +158,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
143 /* if already at the top layer, we need to grow */ 158 /* if already at the top layer, we need to grow */
144 if (!(p = pa[l])) { 159 if (!(p = pa[l])) {
145 *starting_id = id; 160 *starting_id = id;
146 return -2; 161 return IDR_NEED_TO_GROW;
147 } 162 }
148 163
149 /* If we need to go up one layer, continue the 164 /* If we need to go up one layer, continue the
@@ -160,16 +175,17 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
160 id = ((id >> sh) ^ n ^ m) << sh; 175 id = ((id >> sh) ^ n ^ m) << sh;
161 } 176 }
162 if ((id >= MAX_ID_BIT) || (id < 0)) 177 if ((id >= MAX_ID_BIT) || (id < 0))
163 return -3; 178 return IDR_NOMORE_SPACE;
164 if (l == 0) 179 if (l == 0)
165 break; 180 break;
166 /* 181 /*
167 * Create the layer below if it is missing. 182 * Create the layer below if it is missing.
168 */ 183 */
169 if (!p->ary[m]) { 184 if (!p->ary[m]) {
170 if (!(new = alloc_layer(idp))) 185 new = get_from_free_list(idp);
186 if (!new)
171 return -1; 187 return -1;
172 p->ary[m] = new; 188 rcu_assign_pointer(p->ary[m], new);
173 p->count++; 189 p->count++;
174 } 190 }
175 pa[l--] = p; 191 pa[l--] = p;
@@ -192,7 +208,7 @@ build_up:
192 p = idp->top; 208 p = idp->top;
193 layers = idp->layers; 209 layers = idp->layers;
194 if (unlikely(!p)) { 210 if (unlikely(!p)) {
195 if (!(p = alloc_layer(idp))) 211 if (!(p = get_from_free_list(idp)))
196 return -1; 212 return -1;
197 layers = 1; 213 layers = 1;
198 } 214 }
@@ -204,7 +220,7 @@ build_up:
204 layers++; 220 layers++;
205 if (!p->count) 221 if (!p->count)
206 continue; 222 continue;
207 if (!(new = alloc_layer(idp))) { 223 if (!(new = get_from_free_list(idp))) {
208 /* 224 /*
209 * The allocation failed. If we built part of 225 * The allocation failed. If we built part of
210 * the structure tear it down. 226 * the structure tear it down.
@@ -214,7 +230,7 @@ build_up:
214 p = p->ary[0]; 230 p = p->ary[0];
215 new->ary[0] = NULL; 231 new->ary[0] = NULL;
216 new->bitmap = new->count = 0; 232 new->bitmap = new->count = 0;
217 __free_layer(idp, new); 233 __move_to_free_list(idp, new);
218 } 234 }
219 spin_unlock_irqrestore(&idp->lock, flags); 235 spin_unlock_irqrestore(&idp->lock, flags);
220 return -1; 236 return -1;
@@ -225,10 +241,10 @@ build_up:
225 __set_bit(0, &new->bitmap); 241 __set_bit(0, &new->bitmap);
226 p = new; 242 p = new;
227 } 243 }
228 idp->top = p; 244 rcu_assign_pointer(idp->top, p);
229 idp->layers = layers; 245 idp->layers = layers;
230 v = sub_alloc(idp, &id, pa); 246 v = sub_alloc(idp, &id, pa);
231 if (v == -2) 247 if (v == IDR_NEED_TO_GROW)
232 goto build_up; 248 goto build_up;
233 return(v); 249 return(v);
234} 250}
@@ -244,7 +260,8 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
244 * Successfully found an empty slot. Install the user 260 * Successfully found an empty slot. Install the user
245 * pointer and mark the slot full. 261 * pointer and mark the slot full.
246 */ 262 */
247 pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr; 263 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
264 (struct idr_layer *)ptr);
248 pa[0]->count++; 265 pa[0]->count++;
249 idr_mark_full(pa, id); 266 idr_mark_full(pa, id);
250 } 267 }
@@ -277,12 +294,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
277 * This is a cheap hack until the IDR code can be fixed to 294 * This is a cheap hack until the IDR code can be fixed to
278 * return proper error values. 295 * return proper error values.
279 */ 296 */
280 if (rv < 0) { 297 if (rv < 0)
281 if (rv == -1) 298 return _idr_rc_to_errno(rv);
282 return -EAGAIN;
283 else /* Will be -3 */
284 return -ENOSPC;
285 }
286 *id = rv; 299 *id = rv;
287 return 0; 300 return 0;
288} 301}
@@ -312,12 +325,8 @@ int idr_get_new(struct idr *idp, void *ptr, int *id)
312 * This is a cheap hack until the IDR code can be fixed to 325 * This is a cheap hack until the IDR code can be fixed to
313 * return proper error values. 326 * return proper error values.
314 */ 327 */
315 if (rv < 0) { 328 if (rv < 0)
316 if (rv == -1) 329 return _idr_rc_to_errno(rv);
317 return -EAGAIN;
318 else /* Will be -3 */
319 return -ENOSPC;
320 }
321 *id = rv; 330 *id = rv;
322 return 0; 331 return 0;
323} 332}
@@ -325,7 +334,8 @@ EXPORT_SYMBOL(idr_get_new);
325 334
326static void idr_remove_warning(int id) 335static void idr_remove_warning(int id)
327{ 336{
328 printk("idr_remove called for id=%d which is not allocated.\n", id); 337 printk(KERN_WARNING
338 "idr_remove called for id=%d which is not allocated.\n", id);
329 dump_stack(); 339 dump_stack();
330} 340}
331 341
@@ -334,6 +344,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
334 struct idr_layer *p = idp->top; 344 struct idr_layer *p = idp->top;
335 struct idr_layer **pa[MAX_LEVEL]; 345 struct idr_layer **pa[MAX_LEVEL];
336 struct idr_layer ***paa = &pa[0]; 346 struct idr_layer ***paa = &pa[0];
347 struct idr_layer *to_free;
337 int n; 348 int n;
338 349
339 *paa = NULL; 350 *paa = NULL;
@@ -349,13 +360,18 @@ static void sub_remove(struct idr *idp, int shift, int id)
349 n = id & IDR_MASK; 360 n = id & IDR_MASK;
350 if (likely(p != NULL && test_bit(n, &p->bitmap))){ 361 if (likely(p != NULL && test_bit(n, &p->bitmap))){
351 __clear_bit(n, &p->bitmap); 362 __clear_bit(n, &p->bitmap);
352 p->ary[n] = NULL; 363 rcu_assign_pointer(p->ary[n], NULL);
364 to_free = NULL;
353 while(*paa && ! --((**paa)->count)){ 365 while(*paa && ! --((**paa)->count)){
354 free_layer(idp, **paa); 366 if (to_free)
367 free_layer(to_free);
368 to_free = **paa;
355 **paa-- = NULL; 369 **paa-- = NULL;
356 } 370 }
357 if (!*paa) 371 if (!*paa)
358 idp->layers = 0; 372 idp->layers = 0;
373 if (to_free)
374 free_layer(to_free);
359 } else 375 } else
360 idr_remove_warning(id); 376 idr_remove_warning(id);
361} 377}
@@ -368,22 +384,34 @@ static void sub_remove(struct idr *idp, int shift, int id)
368void idr_remove(struct idr *idp, int id) 384void idr_remove(struct idr *idp, int id)
369{ 385{
370 struct idr_layer *p; 386 struct idr_layer *p;
387 struct idr_layer *to_free;
371 388
372 /* Mask off upper bits we don't use for the search. */ 389 /* Mask off upper bits we don't use for the search. */
373 id &= MAX_ID_MASK; 390 id &= MAX_ID_MASK;
374 391
375 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); 392 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
376 if (idp->top && idp->top->count == 1 && (idp->layers > 1) && 393 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
377 idp->top->ary[0]) { // We can drop a layer 394 idp->top->ary[0]) {
378 395 /*
396 * Single child at leftmost slot: we can shrink the tree.
397 * This level is not needed anymore since when layers are
398 * inserted, they are inserted at the top of the existing
399 * tree.
400 */
401 to_free = idp->top;
379 p = idp->top->ary[0]; 402 p = idp->top->ary[0];
380 idp->top->bitmap = idp->top->count = 0; 403 rcu_assign_pointer(idp->top, p);
381 free_layer(idp, idp->top);
382 idp->top = p;
383 --idp->layers; 404 --idp->layers;
405 to_free->bitmap = to_free->count = 0;
406 free_layer(to_free);
384 } 407 }
385 while (idp->id_free_cnt >= IDR_FREE_MAX) { 408 while (idp->id_free_cnt >= IDR_FREE_MAX) {
386 p = alloc_layer(idp); 409 p = get_from_free_list(idp);
410 /*
411 * Note: we don't call the rcu callback here, since the only
412 * layers that fall into the freelist are those that have been
413 * preallocated.
414 */
387 kmem_cache_free(idr_layer_cache, p); 415 kmem_cache_free(idr_layer_cache, p);
388 } 416 }
389 return; 417 return;
@@ -424,15 +452,13 @@ void idr_remove_all(struct idr *idp)
424 452
425 id += 1 << n; 453 id += 1 << n;
426 while (n < fls(id)) { 454 while (n < fls(id)) {
427 if (p) { 455 if (p)
428 memset(p, 0, sizeof *p); 456 free_layer(p);
429 free_layer(idp, p);
430 }
431 n += IDR_BITS; 457 n += IDR_BITS;
432 p = *--paa; 458 p = *--paa;
433 } 459 }
434 } 460 }
435 idp->top = NULL; 461 rcu_assign_pointer(idp->top, NULL);
436 idp->layers = 0; 462 idp->layers = 0;
437} 463}
438EXPORT_SYMBOL(idr_remove_all); 464EXPORT_SYMBOL(idr_remove_all);
@@ -444,7 +470,7 @@ EXPORT_SYMBOL(idr_remove_all);
444void idr_destroy(struct idr *idp) 470void idr_destroy(struct idr *idp)
445{ 471{
446 while (idp->id_free_cnt) { 472 while (idp->id_free_cnt) {
447 struct idr_layer *p = alloc_layer(idp); 473 struct idr_layer *p = get_from_free_list(idp);
448 kmem_cache_free(idr_layer_cache, p); 474 kmem_cache_free(idr_layer_cache, p);
449 } 475 }
450} 476}
@@ -459,7 +485,8 @@ EXPORT_SYMBOL(idr_destroy);
459 * return indicates that @id is not valid or you passed %NULL in 485 * return indicates that @id is not valid or you passed %NULL in
460 * idr_get_new(). 486 * idr_get_new().
461 * 487 *
462 * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). 488 * This function can be called under rcu_read_lock(), given that the leaf
489 * pointers lifetimes are correctly managed.
463 */ 490 */
464void *idr_find(struct idr *idp, int id) 491void *idr_find(struct idr *idp, int id)
465{ 492{
@@ -467,7 +494,7 @@ void *idr_find(struct idr *idp, int id)
467 struct idr_layer *p; 494 struct idr_layer *p;
468 495
469 n = idp->layers * IDR_BITS; 496 n = idp->layers * IDR_BITS;
470 p = idp->top; 497 p = rcu_dereference(idp->top);
471 498
472 /* Mask off upper bits we don't use for the search. */ 499 /* Mask off upper bits we don't use for the search. */
473 id &= MAX_ID_MASK; 500 id &= MAX_ID_MASK;
@@ -477,7 +504,7 @@ void *idr_find(struct idr *idp, int id)
477 504
478 while (n > 0 && p) { 505 while (n > 0 && p) {
479 n -= IDR_BITS; 506 n -= IDR_BITS;
480 p = p->ary[(id >> n) & IDR_MASK]; 507 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
481 } 508 }
482 return((void *)p); 509 return((void *)p);
483} 510}
@@ -510,7 +537,7 @@ int idr_for_each(struct idr *idp,
510 struct idr_layer **paa = &pa[0]; 537 struct idr_layer **paa = &pa[0];
511 538
512 n = idp->layers * IDR_BITS; 539 n = idp->layers * IDR_BITS;
513 p = idp->top; 540 p = rcu_dereference(idp->top);
514 max = 1 << n; 541 max = 1 << n;
515 542
516 id = 0; 543 id = 0;
@@ -518,7 +545,7 @@ int idr_for_each(struct idr *idp,
518 while (n > 0 && p) { 545 while (n > 0 && p) {
519 n -= IDR_BITS; 546 n -= IDR_BITS;
520 *paa++ = p; 547 *paa++ = p;
521 p = p->ary[(id >> n) & IDR_MASK]; 548 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
522 } 549 }
523 550
524 if (p) { 551 if (p) {
@@ -548,7 +575,7 @@ EXPORT_SYMBOL(idr_for_each);
548 * A -ENOENT return indicates that @id was not found. 575 * A -ENOENT return indicates that @id was not found.
549 * A -EINVAL return indicates that @id was not within valid constraints. 576 * A -EINVAL return indicates that @id was not within valid constraints.
550 * 577 *
551 * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). 578 * The caller must serialize with writers.
552 */ 579 */
553void *idr_replace(struct idr *idp, void *ptr, int id) 580void *idr_replace(struct idr *idp, void *ptr, int id)
554{ 581{
@@ -574,13 +601,13 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
574 return ERR_PTR(-ENOENT); 601 return ERR_PTR(-ENOENT);
575 602
576 old_p = p->ary[n]; 603 old_p = p->ary[n];
577 p->ary[n] = ptr; 604 rcu_assign_pointer(p->ary[n], ptr);
578 605
579 return old_p; 606 return old_p;
580} 607}
581EXPORT_SYMBOL(idr_replace); 608EXPORT_SYMBOL(idr_replace);
582 609
583static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) 610static void idr_cache_ctor(void *idr_layer)
584{ 611{
585 memset(idr_layer, 0, sizeof(struct idr_layer)); 612 memset(idr_layer, 0, sizeof(struct idr_layer));
586} 613}
@@ -694,12 +721,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
694 restart: 721 restart:
695 /* get vacant slot */ 722 /* get vacant slot */
696 t = idr_get_empty_slot(&ida->idr, idr_id, pa); 723 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
697 if (t < 0) { 724 if (t < 0)
698 if (t == -1) 725 return _idr_rc_to_errno(t);
699 return -EAGAIN;
700 else /* will be -3 */
701 return -ENOSPC;
702 }
703 726
704 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) 727 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
705 return -ENOSPC; 728 return -ENOSPC;
@@ -720,7 +743,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
720 return -EAGAIN; 743 return -EAGAIN;
721 744
722 memset(bitmap, 0, sizeof(struct ida_bitmap)); 745 memset(bitmap, 0, sizeof(struct ida_bitmap));
723 pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap; 746 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
747 (void *)bitmap);
724 pa[0]->count++; 748 pa[0]->count++;
725 } 749 }
726 750
@@ -749,7 +773,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
749 * allocation. 773 * allocation.
750 */ 774 */
751 if (ida->idr.id_free_cnt || ida->free_bitmap) { 775 if (ida->idr.id_free_cnt || ida->free_bitmap) {
752 struct idr_layer *p = alloc_layer(&ida->idr); 776 struct idr_layer *p = get_from_free_list(&ida->idr);
753 if (p) 777 if (p)
754 kmem_cache_free(idr_layer_cache, p); 778 kmem_cache_free(idr_layer_cache, p);
755 } 779 }