diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/idr.c | 57 |
1 files changed, 43 insertions, 14 deletions
@@ -52,6 +52,19 @@ static struct idr_layer *get_from_free_list(struct idr *idp) | |||
52 | return(p); | 52 | return(p); |
53 | } | 53 | } |
54 | 54 | ||
55 | static void idr_layer_rcu_free(struct rcu_head *head) | ||
56 | { | ||
57 | struct idr_layer *layer; | ||
58 | |||
59 | layer = container_of(head, struct idr_layer, rcu_head); | ||
60 | kmem_cache_free(idr_layer_cache, layer); | ||
61 | } | ||
62 | |||
63 | static inline void free_layer(struct idr_layer *p) | ||
64 | { | ||
65 | call_rcu(&p->rcu_head, idr_layer_rcu_free); | ||
66 | } | ||
67 | |||
55 | /* only called when idp->lock is held */ | 68 | /* only called when idp->lock is held */ |
56 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) | 69 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
57 | { | 70 | { |
@@ -331,6 +344,7 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
331 | struct idr_layer *p = idp->top; | 344 | struct idr_layer *p = idp->top; |
332 | struct idr_layer **pa[MAX_LEVEL]; | 345 | struct idr_layer **pa[MAX_LEVEL]; |
333 | struct idr_layer ***paa = &pa[0]; | 346 | struct idr_layer ***paa = &pa[0]; |
347 | struct idr_layer *to_free; | ||
334 | int n; | 348 | int n; |
335 | 349 | ||
336 | *paa = NULL; | 350 | *paa = NULL; |
@@ -346,13 +360,18 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
346 | n = id & IDR_MASK; | 360 | n = id & IDR_MASK; |
347 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | 361 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
348 | __clear_bit(n, &p->bitmap); | 362 | __clear_bit(n, &p->bitmap); |
349 | p->ary[n] = NULL; | 363 | rcu_assign_pointer(p->ary[n], NULL); |
364 | to_free = NULL; | ||
350 | while(*paa && ! --((**paa)->count)){ | 365 | while(*paa && ! --((**paa)->count)){ |
351 | move_to_free_list(idp, **paa); | 366 | if (to_free) |
367 | free_layer(to_free); | ||
368 | to_free = **paa; | ||
352 | **paa-- = NULL; | 369 | **paa-- = NULL; |
353 | } | 370 | } |
354 | if (!*paa) | 371 | if (!*paa) |
355 | idp->layers = 0; | 372 | idp->layers = 0; |
373 | if (to_free) | ||
374 | free_layer(to_free); | ||
356 | } else | 375 | } else |
357 | idr_remove_warning(id); | 376 | idr_remove_warning(id); |
358 | } | 377 | } |
@@ -365,22 +384,34 @@ static void sub_remove(struct idr *idp, int shift, int id) | |||
365 | void idr_remove(struct idr *idp, int id) | 384 | void idr_remove(struct idr *idp, int id) |
366 | { | 385 | { |
367 | struct idr_layer *p; | 386 | struct idr_layer *p; |
387 | struct idr_layer *to_free; | ||
368 | 388 | ||
369 | /* Mask off upper bits we don't use for the search. */ | 389 | /* Mask off upper bits we don't use for the search. */ |
370 | id &= MAX_ID_MASK; | 390 | id &= MAX_ID_MASK; |
371 | 391 | ||
372 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | 392 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
373 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && | 393 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
374 | idp->top->ary[0]) { // We can drop a layer | 394 | idp->top->ary[0]) { |
375 | 395 | /* | |
396 | * Single child at leftmost slot: we can shrink the tree. | ||
397 | * This level is not needed anymore since when layers are | ||
398 | * inserted, they are inserted at the top of the existing | ||
399 | * tree. | ||
400 | */ | ||
401 | to_free = idp->top; | ||
376 | p = idp->top->ary[0]; | 402 | p = idp->top->ary[0]; |
377 | idp->top->bitmap = idp->top->count = 0; | 403 | rcu_assign_pointer(idp->top, p); |
378 | move_to_free_list(idp, idp->top); | ||
379 | idp->top = p; | ||
380 | --idp->layers; | 404 | --idp->layers; |
405 | to_free->bitmap = to_free->count = 0; | ||
406 | free_layer(to_free); | ||
381 | } | 407 | } |
382 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | 408 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
383 | p = get_from_free_list(idp); | 409 | p = get_from_free_list(idp); |
410 | /* | ||
411 | * Note: we don't call the rcu callback here, since the only | ||
412 | * layers that fall into the freelist are those that have been | ||
413 | * preallocated. | ||
414 | */ | ||
384 | kmem_cache_free(idr_layer_cache, p); | 415 | kmem_cache_free(idr_layer_cache, p); |
385 | } | 416 | } |
386 | return; | 417 | return; |
@@ -421,15 +452,13 @@ void idr_remove_all(struct idr *idp) | |||
421 | 452 | ||
422 | id += 1 << n; | 453 | id += 1 << n; |
423 | while (n < fls(id)) { | 454 | while (n < fls(id)) { |
424 | if (p) { | 455 | if (p) |
425 | memset(p, 0, sizeof *p); | 456 | free_layer(p); |
426 | move_to_free_list(idp, p); | ||
427 | } | ||
428 | n += IDR_BITS; | 457 | n += IDR_BITS; |
429 | p = *--paa; | 458 | p = *--paa; |
430 | } | 459 | } |
431 | } | 460 | } |
432 | idp->top = NULL; | 461 | rcu_assign_pointer(idp->top, NULL); |
433 | idp->layers = 0; | 462 | idp->layers = 0; |
434 | } | 463 | } |
435 | EXPORT_SYMBOL(idr_remove_all); | 464 | EXPORT_SYMBOL(idr_remove_all); |
@@ -546,7 +575,7 @@ EXPORT_SYMBOL(idr_for_each); | |||
546 | * A -ENOENT return indicates that @id was not found. | 575 | * A -ENOENT return indicates that @id was not found. |
547 | * A -EINVAL return indicates that @id was not within valid constraints. | 576 | * A -EINVAL return indicates that @id was not within valid constraints. |
548 | * | 577 | * |
549 | * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). | 578 | * The caller must serialize with writers. |
550 | */ | 579 | */ |
551 | void *idr_replace(struct idr *idp, void *ptr, int id) | 580 | void *idr_replace(struct idr *idp, void *ptr, int id) |
552 | { | 581 | { |
@@ -572,7 +601,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id) | |||
572 | return ERR_PTR(-ENOENT); | 601 | return ERR_PTR(-ENOENT); |
573 | 602 | ||
574 | old_p = p->ary[n]; | 603 | old_p = p->ary[n]; |
575 | p->ary[n] = ptr; | 604 | rcu_assign_pointer(p->ary[n], ptr); |
576 | 605 | ||
577 | return old_p; | 606 | return old_p; |
578 | } | 607 | } |