aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-08-09 17:12:22 -0400
committerPekka Enberg <penberg@kernel.org>2011-08-19 12:34:25 -0400
commit69cb8e6b7c2982e015d2b35a34ac2674c79e801c (patch)
tree37905e4cec71f96f2008e9d13c92cf0b976b3390 /mm/slub.c
parent93ee7a9340d64f20295aacc3fb6a22b759323280 (diff)
slub: free slabs without holding locks
There are two situations in which slub holds a lock while releasing pages: A. During kmem_cache_shrink() B. During kmem_cache_close() For A build a list while holding the lock and then release the pages later. In case of B we are the last remaining user of the slab so there is no need to take the listlock. After this patch all calls to the page allocator to free pages are done without holding any spinlocks. kmem_cache_destroy() will still hold the slub_lock semaphore. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 9f662d70eb47..30c4558acc8b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2970,13 +2970,13 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
2970 2970
2971/* 2971/*
2972 * Attempt to free all partial slabs on a node. 2972 * Attempt to free all partial slabs on a node.
2973 * This is called from kmem_cache_close(). We must be the last thread
2974 * using the cache and therefore we do not need to lock anymore.
2973 */ 2975 */
2974static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) 2976static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
2975{ 2977{
2976 unsigned long flags;
2977 struct page *page, *h; 2978 struct page *page, *h;
2978 2979
2979 spin_lock_irqsave(&n->list_lock, flags);
2980 list_for_each_entry_safe(page, h, &n->partial, lru) { 2980 list_for_each_entry_safe(page, h, &n->partial, lru) {
2981 if (!page->inuse) { 2981 if (!page->inuse) {
2982 remove_partial(n, page); 2982 remove_partial(n, page);
@@ -2986,7 +2986,6 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
2986 "Objects remaining on kmem_cache_close()"); 2986 "Objects remaining on kmem_cache_close()");
2987 } 2987 }
2988 } 2988 }
2989 spin_unlock_irqrestore(&n->list_lock, flags);
2990} 2989}
2991 2990
2992/* 2991/*
@@ -3020,6 +3019,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
3020 s->refcount--; 3019 s->refcount--;
3021 if (!s->refcount) { 3020 if (!s->refcount) {
3022 list_del(&s->list); 3021 list_del(&s->list);
3022 up_write(&slub_lock);
3023 if (kmem_cache_close(s)) { 3023 if (kmem_cache_close(s)) {
3024 printk(KERN_ERR "SLUB %s: %s called for cache that " 3024 printk(KERN_ERR "SLUB %s: %s called for cache that "
3025 "still has objects.\n", s->name, __func__); 3025 "still has objects.\n", s->name, __func__);
@@ -3028,8 +3028,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
3028 if (s->flags & SLAB_DESTROY_BY_RCU) 3028 if (s->flags & SLAB_DESTROY_BY_RCU)
3029 rcu_barrier(); 3029 rcu_barrier();
3030 sysfs_slab_remove(s); 3030 sysfs_slab_remove(s);
3031 } 3031 } else
3032 up_write(&slub_lock); 3032 up_write(&slub_lock);
3033} 3033}
3034EXPORT_SYMBOL(kmem_cache_destroy); 3034EXPORT_SYMBOL(kmem_cache_destroy);
3035 3035
@@ -3347,23 +3347,23 @@ int kmem_cache_shrink(struct kmem_cache *s)
3347 * list_lock. page->inuse here is the upper limit. 3347 * list_lock. page->inuse here is the upper limit.
3348 */ 3348 */
3349 list_for_each_entry_safe(page, t, &n->partial, lru) { 3349 list_for_each_entry_safe(page, t, &n->partial, lru) {
3350 if (!page->inuse) { 3350 list_move(&page->lru, slabs_by_inuse + page->inuse);
3351 remove_partial(n, page); 3351 if (!page->inuse)
3352 discard_slab(s, page); 3352 n->nr_partial--;
3353 } else {
3354 list_move(&page->lru,
3355 slabs_by_inuse + page->inuse);
3356 }
3357 } 3353 }
3358 3354
3359 /* 3355 /*
3360 * Rebuild the partial list with the slabs filled up most 3356 * Rebuild the partial list with the slabs filled up most
3361 * first and the least used slabs at the end. 3357 * first and the least used slabs at the end.
3362 */ 3358 */
3363 for (i = objects - 1; i >= 0; i--) 3359 for (i = objects - 1; i > 0; i--)
3364 list_splice(slabs_by_inuse + i, n->partial.prev); 3360 list_splice(slabs_by_inuse + i, n->partial.prev);
3365 3361
3366 spin_unlock_irqrestore(&n->list_lock, flags); 3362 spin_unlock_irqrestore(&n->list_lock, flags);
3363
3364 /* Release empty slabs */
3365 list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
3366 discard_slab(s, page);
3367 } 3367 }
3368 3368
3369 kfree(slabs_by_inuse); 3369 kfree(slabs_by_inuse);