aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-07-04 15:42:46 -0400
committerMatthew Wilcox <willy@infradead.org>2018-10-21 10:46:33 -0400
commitf32f004cddf86d63a9c42994bbce9f4e2f07c9fa (patch)
treeecd72e417a02e78fc70622c9eb1a889044138e10 /lib
parent371c752dc66948714ee3b66c3306f3ff1ff71d2e (diff)
ida: Convert to XArray
Use the XA_TRACK_FREE ability to track which entries have a free bit, similarly to how it uses the radix tree's IDR_FREE tag. This eliminates the per-cpu ida_bitmap preload, and fixes the memory consumption regression I introduced when making the IDR able to store any pointer. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/idr.c379
-rw-r--r--lib/radix-tree.c71
2 files changed, 203 insertions, 247 deletions
diff --git a/lib/idr.c b/lib/idr.c
index 9a366b5be2c2..3c20ad9b0463 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -6,8 +6,6 @@
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <linux/xarray.h> 7#include <linux/xarray.h>
8 8
9DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
10
11/** 9/**
12 * idr_alloc_u32() - Allocate an ID. 10 * idr_alloc_u32() - Allocate an ID.
13 * @idr: IDR handle. 11 * @idr: IDR handle.
@@ -320,6 +318,9 @@ EXPORT_SYMBOL(idr_replace);
320 * free the individual IDs in it. You can use ida_is_empty() to find 318 * free the individual IDs in it. You can use ida_is_empty() to find
321 * out whether the IDA has any IDs currently allocated. 319 * out whether the IDA has any IDs currently allocated.
322 * 320 *
321 * The IDA handles its own locking. It is safe to call any of the IDA
322 * functions without synchronisation in your code.
323 *
323 * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward 324 * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
324 * limitation, it should be quite straightforward to raise the maximum. 325 * limitation, it should be quite straightforward to raise the maximum.
325 */ 326 */
@@ -327,151 +328,197 @@ EXPORT_SYMBOL(idr_replace);
327/* 328/*
328 * Developer's notes: 329 * Developer's notes:
329 * 330 *
330 * The IDA uses the functionality provided by the IDR & radix tree to store 331 * The IDA uses the functionality provided by the XArray to store bitmaps in
331 * bitmaps in each entry. The IDR_FREE tag means there is at least one bit 332 * each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap
332 * free, unlike the IDR where it means at least one entry is free. 333 * have been set.
333 * 334 *
334 * I considered telling the radix tree that each slot is an order-10 node 335 * I considered telling the XArray that each slot is an order-10 node
335 * and storing the bit numbers in the radix tree, but the radix tree can't 336 * and indexing by bit number, but the XArray can't allow a single multi-index
336 * allow a single multiorder entry at index 0, which would significantly 337 * entry in the head, which would significantly increase memory consumption
337 * increase memory consumption for the IDA. So instead we divide the index 338 * for the IDA. So instead we divide the index by the number of bits in the
338 * by the number of bits in the leaf bitmap before doing a radix tree lookup. 339 * leaf bitmap before doing a radix tree lookup.
339 * 340 *
340 * As an optimisation, if there are only a few low bits set in any given 341 * As an optimisation, if there are only a few low bits set in any given
341 * leaf, instead of allocating a 128-byte bitmap, we store the bits 342 * leaf, instead of allocating a 128-byte bitmap, we store the bits
342 * directly in the entry. 343 * as a value entry. Value entries never have the XA_FREE_MARK cleared
343 * 344 * because we can always convert them into a bitmap entry.
344 * We allow the radix tree 'exceptional' count to get out of date. Nothing 345 *
345 * in the IDA nor the radix tree code checks it. If it becomes important 346 * It would be possible to optimise further; once we've run out of a
346 * to maintain an accurate exceptional count, switch the rcu_assign_pointer() 347 * single 128-byte bitmap, we currently switch to a 576-byte node, put
347 * calls to radix_tree_iter_replace() which will correct the exceptional 348 * the 128-byte bitmap in the first entry and then start allocating extra
348 * count. 349 * 128-byte entries. We could instead use the 512 bytes of the node's
349 * 350 * data as a bitmap before moving to that scheme. I do not believe this
350 * The IDA always requires a lock to alloc/free. If we add a 'test_bit' 351 * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
352 * users of the IDA and almost none of them use more than 1024 entries.
353 * Those that do use more than the 8192 IDs that the 512 bytes would
354 * provide.
355 *
356 * The IDA always uses a lock to alloc/free. If we add a 'test_bit'
351 * equivalent, it will still need locking. Going to RCU lookup would require 357 * equivalent, it will still need locking. Going to RCU lookup would require
352 * using RCU to free bitmaps, and that's not trivial without embedding an 358 * using RCU to free bitmaps, and that's not trivial without embedding an
353 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte 359 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
354 * bitmap, which is excessive. 360 * bitmap, which is excessive.
355 */ 361 */
356 362
357#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1) 363/**
358 364 * ida_alloc_range() - Allocate an unused ID.
359static int ida_get_new_above(struct ida *ida, int start) 365 * @ida: IDA handle.
366 * @min: Lowest ID to allocate.
367 * @max: Highest ID to allocate.
368 * @gfp: Memory allocation flags.
369 *
370 * Allocate an ID between @min and @max, inclusive. The allocated ID will
371 * not exceed %INT_MAX, even if @max is larger.
372 *
373 * Context: Any context.
374 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
375 * or %-ENOSPC if there are no free IDs.
376 */
377int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
378 gfp_t gfp)
360{ 379{
361 struct radix_tree_root *root = &ida->ida_rt; 380 XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
362 void __rcu **slot; 381 unsigned bit = min % IDA_BITMAP_BITS;
363 struct radix_tree_iter iter; 382 unsigned long flags;
364 struct ida_bitmap *bitmap; 383 struct ida_bitmap *bitmap, *alloc = NULL;
365 unsigned long index; 384
366 unsigned bit; 385 if ((int)min < 0)
367 int new; 386 return -ENOSPC;
368 387
369 index = start / IDA_BITMAP_BITS; 388 if ((int)max < 0)
370 bit = start % IDA_BITMAP_BITS; 389 max = INT_MAX;
371 390
372 slot = radix_tree_iter_init(&iter, index); 391retry:
373 for (;;) { 392 xas_lock_irqsave(&xas, flags);
374 if (slot) 393next:
375 slot = radix_tree_next_slot(slot, &iter, 394 bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
376 RADIX_TREE_ITER_TAGGED); 395 if (xas.xa_index > min / IDA_BITMAP_BITS)
377 if (!slot) { 396 bit = 0;
378 slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX); 397 if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
379 if (IS_ERR(slot)) { 398 goto nospc;
380 if (slot == ERR_PTR(-ENOMEM)) 399
381 return -EAGAIN; 400 if (xa_is_value(bitmap)) {
382 return PTR_ERR(slot); 401 unsigned long tmp = xa_to_value(bitmap);
402
403 if (bit < BITS_PER_XA_VALUE) {
404 bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
405 if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
406 goto nospc;
407 if (bit < BITS_PER_XA_VALUE) {
408 tmp |= 1UL << bit;
409 xas_store(&xas, xa_mk_value(tmp));
410 goto out;
383 } 411 }
384 } 412 }
385 if (iter.index > index) 413 bitmap = alloc;
386 bit = 0; 414 if (!bitmap)
387 new = iter.index * IDA_BITMAP_BITS; 415 bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
388 bitmap = rcu_dereference_raw(*slot); 416 if (!bitmap)
389 if (xa_is_value(bitmap)) { 417 goto alloc;
390 unsigned long tmp = xa_to_value(bitmap); 418 bitmap->bitmap[0] = tmp;
391 int vbit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, 419 xas_store(&xas, bitmap);
392 bit); 420 if (xas_error(&xas)) {
393 if (vbit < BITS_PER_XA_VALUE) { 421 bitmap->bitmap[0] = 0;
394 tmp |= 1UL << vbit; 422 goto out;
395 rcu_assign_pointer(*slot, xa_mk_value(tmp));
396 return new + vbit;
397 }
398 bitmap = this_cpu_xchg(ida_bitmap, NULL);
399 if (!bitmap)
400 return -EAGAIN;
401 bitmap->bitmap[0] = tmp;
402 rcu_assign_pointer(*slot, bitmap);
403 } 423 }
424 }
404 425
405 if (bitmap) { 426 if (bitmap) {
406 bit = find_next_zero_bit(bitmap->bitmap, 427 bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
407 IDA_BITMAP_BITS, bit); 428 if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
408 new += bit; 429 goto nospc;
409 if (new < 0) 430 if (bit == IDA_BITMAP_BITS)
410 return -ENOSPC; 431 goto next;
411 if (bit == IDA_BITMAP_BITS)
412 continue;
413 432
414 __set_bit(bit, bitmap->bitmap); 433 __set_bit(bit, bitmap->bitmap);
415 if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS)) 434 if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
416 radix_tree_iter_tag_clear(root, &iter, 435 xas_clear_mark(&xas, XA_FREE_MARK);
417 IDR_FREE); 436 } else {
437 if (bit < BITS_PER_XA_VALUE) {
438 bitmap = xa_mk_value(1UL << bit);
418 } else { 439 } else {
419 new += bit; 440 bitmap = alloc;
420 if (new < 0) 441 if (!bitmap)
421 return -ENOSPC; 442 bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
422 if (bit < BITS_PER_XA_VALUE) { 443 if (!bitmap)
423 bitmap = xa_mk_value(1UL << bit); 444 goto alloc;
424 } else { 445 __set_bit(bit, bitmap->bitmap);
425 bitmap = this_cpu_xchg(ida_bitmap, NULL);
426 if (!bitmap)
427 return -EAGAIN;
428 __set_bit(bit, bitmap->bitmap);
429 }
430 radix_tree_iter_replace(root, &iter, slot, bitmap);
431 } 446 }
432 447 xas_store(&xas, bitmap);
433 return new; 448 }
449out:
450 xas_unlock_irqrestore(&xas, flags);
451 if (xas_nomem(&xas, gfp)) {
452 xas.xa_index = min / IDA_BITMAP_BITS;
453 bit = min % IDA_BITMAP_BITS;
454 goto retry;
434 } 455 }
456 if (bitmap != alloc)
457 kfree(alloc);
458 if (xas_error(&xas))
459 return xas_error(&xas);
460 return xas.xa_index * IDA_BITMAP_BITS + bit;
461alloc:
462 xas_unlock_irqrestore(&xas, flags);
463 alloc = kzalloc(sizeof(*bitmap), gfp);
464 if (!alloc)
465 return -ENOMEM;
466 xas_set(&xas, min / IDA_BITMAP_BITS);
467 bit = min % IDA_BITMAP_BITS;
468 goto retry;
469nospc:
470 xas_unlock_irqrestore(&xas, flags);
471 return -ENOSPC;
435} 472}
473EXPORT_SYMBOL(ida_alloc_range);
436 474
437static void ida_remove(struct ida *ida, int id) 475/**
476 * ida_free() - Release an allocated ID.
477 * @ida: IDA handle.
478 * @id: Previously allocated ID.
479 *
480 * Context: Any context.
481 */
482void ida_free(struct ida *ida, unsigned int id)
438{ 483{
439 unsigned long index = id / IDA_BITMAP_BITS; 484 XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
440 unsigned offset = id % IDA_BITMAP_BITS; 485 unsigned bit = id % IDA_BITMAP_BITS;
441 struct ida_bitmap *bitmap; 486 struct ida_bitmap *bitmap;
442 unsigned long *btmp; 487 unsigned long flags;
443 struct radix_tree_iter iter;
444 void __rcu **slot;
445 488
446 slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index); 489 BUG_ON((int)id < 0);
447 if (!slot) 490
448 goto err; 491 xas_lock_irqsave(&xas, flags);
492 bitmap = xas_load(&xas);
449 493
450 bitmap = rcu_dereference_raw(*slot);
451 if (xa_is_value(bitmap)) { 494 if (xa_is_value(bitmap)) {
452 btmp = (unsigned long *)slot; 495 unsigned long v = xa_to_value(bitmap);
453 offset += 1; /* Intimate knowledge of the value encoding */ 496 if (bit >= BITS_PER_XA_VALUE)
454 if (offset >= BITS_PER_LONG) 497 goto err;
498 if (!(v & (1UL << bit)))
455 goto err; 499 goto err;
500 v &= ~(1UL << bit);
501 if (!v)
502 goto delete;
503 xas_store(&xas, xa_mk_value(v));
456 } else { 504 } else {
457 btmp = bitmap->bitmap; 505 if (!test_bit(bit, bitmap->bitmap))
458 } 506 goto err;
459 if (!test_bit(offset, btmp)) 507 __clear_bit(bit, bitmap->bitmap);
460 goto err; 508 xas_set_mark(&xas, XA_FREE_MARK);
461 509 if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
462 __clear_bit(offset, btmp); 510 kfree(bitmap);
463 radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE); 511delete:
464 if (xa_is_value(bitmap)) { 512 xas_store(&xas, NULL);
465 if (xa_to_value(rcu_dereference_raw(*slot)) == 0) 513 }
466 radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
467 } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) {
468 kfree(bitmap);
469 radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
470 } 514 }
515 xas_unlock_irqrestore(&xas, flags);
471 return; 516 return;
472 err: 517 err:
518 xas_unlock_irqrestore(&xas, flags);
473 WARN(1, "ida_free called for id=%d which is not allocated.\n", id); 519 WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
474} 520}
521EXPORT_SYMBOL(ida_free);
475 522
476/** 523/**
477 * ida_destroy() - Free all IDs. 524 * ida_destroy() - Free all IDs.
@@ -486,80 +533,60 @@ static void ida_remove(struct ida *ida, int id)
486 */ 533 */
487void ida_destroy(struct ida *ida) 534void ida_destroy(struct ida *ida)
488{ 535{
536 XA_STATE(xas, &ida->xa, 0);
537 struct ida_bitmap *bitmap;
489 unsigned long flags; 538 unsigned long flags;
490 struct radix_tree_iter iter;
491 void __rcu **slot;
492 539
493 xa_lock_irqsave(&ida->ida_rt, flags); 540 xas_lock_irqsave(&xas, flags);
494 radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) { 541 xas_for_each(&xas, bitmap, ULONG_MAX) {
495 struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
496 if (!xa_is_value(bitmap)) 542 if (!xa_is_value(bitmap))
497 kfree(bitmap); 543 kfree(bitmap);
498 radix_tree_iter_delete(&ida->ida_rt, &iter, slot); 544 xas_store(&xas, NULL);
499 } 545 }
500 xa_unlock_irqrestore(&ida->ida_rt, flags); 546 xas_unlock_irqrestore(&xas, flags);
501} 547}
502EXPORT_SYMBOL(ida_destroy); 548EXPORT_SYMBOL(ida_destroy);
503 549
504/** 550#ifndef __KERNEL__
505 * ida_alloc_range() - Allocate an unused ID. 551extern void xa_dump_index(unsigned long index, unsigned int shift);
506 * @ida: IDA handle. 552#define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS)
507 * @min: Lowest ID to allocate.
508 * @max: Highest ID to allocate.
509 * @gfp: Memory allocation flags.
510 *
511 * Allocate an ID between @min and @max, inclusive. The allocated ID will
512 * not exceed %INT_MAX, even if @max is larger.
513 *
514 * Context: Any context.
515 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
516 * or %-ENOSPC if there are no free IDs.
517 */
518int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
519 gfp_t gfp)
520{
521 int id = 0;
522 unsigned long flags;
523
524 if ((int)min < 0)
525 return -ENOSPC;
526 553
527 if ((int)max < 0) 554static void ida_dump_entry(void *entry, unsigned long index)
528 max = INT_MAX; 555{
529 556 unsigned long i;
530again: 557
531 xa_lock_irqsave(&ida->ida_rt, flags); 558 if (!entry)
532 id = ida_get_new_above(ida, min); 559 return;
533 if (id > (int)max) { 560
534 ida_remove(ida, id); 561 if (xa_is_node(entry)) {
535 id = -ENOSPC; 562 struct xa_node *node = xa_to_node(entry);
536 } 563 unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
537 xa_unlock_irqrestore(&ida->ida_rt, flags); 564 XA_CHUNK_SHIFT;
565
566 xa_dump_index(index * IDA_BITMAP_BITS, shift);
567 xa_dump_node(node);
568 for (i = 0; i < XA_CHUNK_SIZE; i++)
569 ida_dump_entry(node->slots[i],
570 index | (i << node->shift));
571 } else if (xa_is_value(entry)) {
572 xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
573 pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
574 } else {
575 struct ida_bitmap *bitmap = entry;
538 576
539 if (unlikely(id == -EAGAIN)) { 577 xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
540 if (!ida_pre_get(ida, gfp)) 578 pr_cont("bitmap: %p data", bitmap);
541 return -ENOMEM; 579 for (i = 0; i < IDA_BITMAP_LONGS; i++)
542 goto again; 580 pr_cont(" %lx", bitmap->bitmap[i]);
581 pr_cont("\n");
543 } 582 }
544
545 return id;
546} 583}
547EXPORT_SYMBOL(ida_alloc_range);
548 584
549/** 585static void ida_dump(struct ida *ida)
550 * ida_free() - Release an allocated ID.
551 * @ida: IDA handle.
552 * @id: Previously allocated ID.
553 *
554 * Context: Any context.
555 */
556void ida_free(struct ida *ida, unsigned int id)
557{ 586{
558 unsigned long flags; 587 struct xarray *xa = &ida->xa;
559 588 pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
560 BUG_ON((int)id < 0); 589 xa->xa_flags >> ROOT_TAG_SHIFT);
561 xa_lock_irqsave(&ida->ida_rt, flags); 590 ida_dump_entry(xa->xa_head, 0);
562 ida_remove(ida, id);
563 xa_unlock_irqrestore(&ida->ida_rt, flags);
564} 591}
565EXPORT_SYMBOL(ida_free); 592#endif
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 3479d93c32b9..68702061464f 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -255,54 +255,6 @@ static unsigned long next_index(unsigned long index,
255 return (index & ~node_maxindex(node)) + (offset << node->shift); 255 return (index & ~node_maxindex(node)) + (offset << node->shift);
256} 256}
257 257
258#ifndef __KERNEL__
259static void dump_ida_node(void *entry, unsigned long index)
260{
261 unsigned long i;
262
263 if (!entry)
264 return;
265
266 if (radix_tree_is_internal_node(entry)) {
267 struct radix_tree_node *node = entry_to_node(entry);
268
269 pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
270 node, node->offset, index * IDA_BITMAP_BITS,
271 ((index | node_maxindex(node)) + 1) *
272 IDA_BITMAP_BITS - 1,
273 node->parent, node->tags[0][0], node->shift,
274 node->count);
275 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
276 dump_ida_node(node->slots[i],
277 index | (i << node->shift));
278 } else if (xa_is_value(entry)) {
279 pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n",
280 entry, (int)(index & RADIX_TREE_MAP_MASK),
281 index * IDA_BITMAP_BITS,
282 index * IDA_BITMAP_BITS + BITS_PER_XA_VALUE,
283 xa_to_value(entry));
284 } else {
285 struct ida_bitmap *bitmap = entry;
286
287 pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap,
288 (int)(index & RADIX_TREE_MAP_MASK),
289 index * IDA_BITMAP_BITS,
290 (index + 1) * IDA_BITMAP_BITS - 1);
291 for (i = 0; i < IDA_BITMAP_LONGS; i++)
292 pr_cont(" %lx", bitmap->bitmap[i]);
293 pr_cont("\n");
294 }
295}
296
297static void ida_dump(struct ida *ida)
298{
299 struct radix_tree_root *root = &ida->ida_rt;
300 pr_debug("ida: %p node %p free %d\n", ida, root->xa_head,
301 root->xa_flags >> ROOT_TAG_SHIFT);
302 dump_ida_node(root->xa_head, 0);
303}
304#endif
305
306/* 258/*
307 * This assumes that the caller has performed appropriate preallocation, and 259 * This assumes that the caller has performed appropriate preallocation, and
308 * that the caller has pinned this thread of control to the current CPU. 260 * that the caller has pinned this thread of control to the current CPU.
@@ -2039,27 +1991,6 @@ void idr_preload(gfp_t gfp_mask)
2039} 1991}
2040EXPORT_SYMBOL(idr_preload); 1992EXPORT_SYMBOL(idr_preload);
2041 1993
2042int ida_pre_get(struct ida *ida, gfp_t gfp)
2043{
2044 /*
2045 * The IDA API has no preload_end() equivalent. Instead,
2046 * ida_get_new() can return -EAGAIN, prompting the caller
2047 * to return to the ida_pre_get() step.
2048 */
2049 if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
2050 preempt_enable();
2051
2052 if (!this_cpu_read(ida_bitmap)) {
2053 struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp);
2054 if (!bitmap)
2055 return 0;
2056 if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
2057 kfree(bitmap);
2058 }
2059
2060 return 1;
2061}
2062
2063void __rcu **idr_get_free(struct radix_tree_root *root, 1994void __rcu **idr_get_free(struct radix_tree_root *root,
2064 struct radix_tree_iter *iter, gfp_t gfp, 1995 struct radix_tree_iter *iter, gfp_t gfp,
2065 unsigned long max) 1996 unsigned long max)
@@ -2201,8 +2132,6 @@ static int radix_tree_cpu_dead(unsigned int cpu)
2201 kmem_cache_free(radix_tree_node_cachep, node); 2132 kmem_cache_free(radix_tree_node_cachep, node);
2202 rtp->nr--; 2133 rtp->nr--;
2203 } 2134 }
2204 kfree(per_cpu(ida_bitmap, cpu));
2205 per_cpu(ida_bitmap, cpu) = NULL;
2206 return 0; 2135 return 0;
2207} 2136}
2208 2137