aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJoonsoo Kim <js1304@gmail.com>2012-08-15 11:02:40 -0400
committerPekka Enberg <penberg@kernel.org>2012-10-19 03:19:24 -0400
commit837d678dc264c797c16f81cf56f615f7544891c1 (patch)
tree9ed759efc6bb0e404a6e4faf576f50b4641011a3 /mm
parentddffeb8c4d0331609ef2581d84de4d763607bd37 (diff)
slub: remove one code path and reduce lock contention in __slab_free()
When we try to free object, there is some of case that we need to take a node lock. This is the necessary step for preventing a race. After taking a lock, then we try to cmpxchg_double_slab(). But, there is a possible scenario that cmpxchg_double_slab() is failed with taking a lock. Following example explains it. CPU A CPU B need lock ... need lock ... lock!! lock..but spin free success spin... unlock lock!! free fail In this case, retry with taking a lock is occured in CPU A. I think that in this case for CPU A, "release a lock first, and re-take a lock if necessary" is preferable way. There are two reasons for this. First, this makes __slab_free()'s logic somehow simple. With this patch, 'was_frozen = 1' is "always" handled without taking a lock. So we can remove one code path. Second, it may reduce lock contention. When we do retrying, status of slab is already changed, so we don't need a lock anymore in almost every case. "release a lock first, and re-take a lock if necessary" policy is helpful to this. Signed-off-by: Joonsoo Kim <js1304@gmail.com> Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c34
1 files changed, 14 insertions, 20 deletions
diff --git a/mm/slub.c b/mm/slub.c
index a0d698467f70..e7aec2001ae5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2459,7 +2459,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2459 void *prior; 2459 void *prior;
2460 void **object = (void *)x; 2460 void **object = (void *)x;
2461 int was_frozen; 2461 int was_frozen;
2462 int inuse;
2463 struct page new; 2462 struct page new;
2464 unsigned long counters; 2463 unsigned long counters;
2465 struct kmem_cache_node *n = NULL; 2464 struct kmem_cache_node *n = NULL;
@@ -2472,13 +2471,17 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2472 return; 2471 return;
2473 2472
2474 do { 2473 do {
2474 if (unlikely(n)) {
2475 spin_unlock_irqrestore(&n->list_lock, flags);
2476 n = NULL;
2477 }
2475 prior = page->freelist; 2478 prior = page->freelist;
2476 counters = page->counters; 2479 counters = page->counters;
2477 set_freepointer(s, object, prior); 2480 set_freepointer(s, object, prior);
2478 new.counters = counters; 2481 new.counters = counters;
2479 was_frozen = new.frozen; 2482 was_frozen = new.frozen;
2480 new.inuse--; 2483 new.inuse--;
2481 if ((!new.inuse || !prior) && !was_frozen && !n) { 2484 if ((!new.inuse || !prior) && !was_frozen) {
2482 2485
2483 if (!kmem_cache_debug(s) && !prior) 2486 if (!kmem_cache_debug(s) && !prior)
2484 2487
@@ -2503,7 +2506,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2503 2506
2504 } 2507 }
2505 } 2508 }
2506 inuse = new.inuse;
2507 2509
2508 } while (!cmpxchg_double_slab(s, page, 2510 } while (!cmpxchg_double_slab(s, page,
2509 prior, counters, 2511 prior, counters,
@@ -2529,25 +2531,17 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2529 return; 2531 return;
2530 } 2532 }
2531 2533
2534 if (unlikely(!new.inuse && n->nr_partial > s->min_partial))
2535 goto slab_empty;
2536
2532 /* 2537 /*
2533 * was_frozen may have been set after we acquired the list_lock in 2538 * Objects left in the slab. If it was not on the partial list before
2534 * an earlier loop. So we need to check it here again. 2539 * then add it.
2535 */ 2540 */
2536 if (was_frozen) 2541 if (kmem_cache_debug(s) && unlikely(!prior)) {
2537 stat(s, FREE_FROZEN); 2542 remove_full(s, page);
2538 else { 2543 add_partial(n, page, DEACTIVATE_TO_TAIL);
2539 if (unlikely(!inuse && n->nr_partial > s->min_partial)) 2544 stat(s, FREE_ADD_PARTIAL);
2540 goto slab_empty;
2541
2542 /*
2543 * Objects left in the slab. If it was not on the partial list before
2544 * then add it.
2545 */
2546 if (unlikely(!prior)) {
2547 remove_full(s, page);
2548 add_partial(n, page, DEACTIVATE_TO_TAIL);
2549 stat(s, FREE_ADD_PARTIAL);
2550 }
2551 } 2545 }
2552 spin_unlock_irqrestore(&n->list_lock, flags); 2546 spin_unlock_irqrestore(&n->list_lock, flags);
2553 return; 2547 return;