diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 34 |
1 files changed, 14 insertions, 20 deletions
@@ -2459,7 +2459,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2459 | void *prior; | 2459 | void *prior; |
2460 | void **object = (void *)x; | 2460 | void **object = (void *)x; |
2461 | int was_frozen; | 2461 | int was_frozen; |
2462 | int inuse; | ||
2463 | struct page new; | 2462 | struct page new; |
2464 | unsigned long counters; | 2463 | unsigned long counters; |
2465 | struct kmem_cache_node *n = NULL; | 2464 | struct kmem_cache_node *n = NULL; |
@@ -2472,13 +2471,17 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2472 | return; | 2471 | return; |
2473 | 2472 | ||
2474 | do { | 2473 | do { |
2474 | if (unlikely(n)) { | ||
2475 | spin_unlock_irqrestore(&n->list_lock, flags); | ||
2476 | n = NULL; | ||
2477 | } | ||
2475 | prior = page->freelist; | 2478 | prior = page->freelist; |
2476 | counters = page->counters; | 2479 | counters = page->counters; |
2477 | set_freepointer(s, object, prior); | 2480 | set_freepointer(s, object, prior); |
2478 | new.counters = counters; | 2481 | new.counters = counters; |
2479 | was_frozen = new.frozen; | 2482 | was_frozen = new.frozen; |
2480 | new.inuse--; | 2483 | new.inuse--; |
2481 | if ((!new.inuse || !prior) && !was_frozen && !n) { | 2484 | if ((!new.inuse || !prior) && !was_frozen) { |
2482 | 2485 | ||
2483 | if (!kmem_cache_debug(s) && !prior) | 2486 | if (!kmem_cache_debug(s) && !prior) |
2484 | 2487 | ||
@@ -2503,7 +2506,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2503 | 2506 | ||
2504 | } | 2507 | } |
2505 | } | 2508 | } |
2506 | inuse = new.inuse; | ||
2507 | 2509 | ||
2508 | } while (!cmpxchg_double_slab(s, page, | 2510 | } while (!cmpxchg_double_slab(s, page, |
2509 | prior, counters, | 2511 | prior, counters, |
@@ -2529,25 +2531,17 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2529 | return; | 2531 | return; |
2530 | } | 2532 | } |
2531 | 2533 | ||
2534 | if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) | ||
2535 | goto slab_empty; | ||
2536 | |||
2532 | /* | 2537 | /* |
2533 | * was_frozen may have been set after we acquired the list_lock in | 2538 | * Objects left in the slab. If it was not on the partial list before |
2534 | * an earlier loop. So we need to check it here again. | 2539 | * then add it. |
2535 | */ | 2540 | */ |
2536 | if (was_frozen) | 2541 | if (kmem_cache_debug(s) && unlikely(!prior)) { |
2537 | stat(s, FREE_FROZEN); | 2542 | remove_full(s, page); |
2538 | else { | 2543 | add_partial(n, page, DEACTIVATE_TO_TAIL); |
2539 | if (unlikely(!inuse && n->nr_partial > s->min_partial)) | 2544 | stat(s, FREE_ADD_PARTIAL); |
2540 | goto slab_empty; | ||
2541 | |||
2542 | /* | ||
2543 | * Objects left in the slab. If it was not on the partial list before | ||
2544 | * then add it. | ||
2545 | */ | ||
2546 | if (unlikely(!prior)) { | ||
2547 | remove_full(s, page); | ||
2548 | add_partial(n, page, DEACTIVATE_TO_TAIL); | ||
2549 | stat(s, FREE_ADD_PARTIAL); | ||
2550 | } | ||
2551 | } | 2545 | } |
2552 | spin_unlock_irqrestore(&n->list_lock, flags); | 2546 | spin_unlock_irqrestore(&n->list_lock, flags); |
2553 | return; | 2547 | return; |