diff options
| author | Christoph Lameter <cl@linux.com> | 2010-09-28 09:10:28 -0400 | 
|---|---|---|
| committer | Pekka Enberg <penberg@kernel.org> | 2010-10-02 03:44:10 -0400 | 
| commit | 62e346a83026a28526fc9799337bcc6154819f25 (patch) | |
| tree | 04514b189c3005bca093149769a117117ec0dff0 | |
| parent | f7cb1933621bce66a77f690776a16fe3ebbc4d58 (diff) | |
slub: extract common code to remove objects from partial list without locking
There are a couple of places where repeat the same statements when removing
a page from the partial list. Consolidate that into __remove_partial().
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
| -rw-r--r-- | mm/slub.c | 19 | 
1 files changed, 11 insertions, 8 deletions
| @@ -1310,13 +1310,19 @@ static void add_partial(struct kmem_cache_node *n, | |||
| 1310 | spin_unlock(&n->list_lock); | 1310 | spin_unlock(&n->list_lock); | 
| 1311 | } | 1311 | } | 
| 1312 | 1312 | ||
| 1313 | static inline void __remove_partial(struct kmem_cache_node *n, | ||
| 1314 | struct page *page) | ||
| 1315 | { | ||
| 1316 | list_del(&page->lru); | ||
| 1317 | n->nr_partial--; | ||
| 1318 | } | ||
| 1319 | |||
| 1313 | static void remove_partial(struct kmem_cache *s, struct page *page) | 1320 | static void remove_partial(struct kmem_cache *s, struct page *page) | 
| 1314 | { | 1321 | { | 
| 1315 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1322 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 
| 1316 | 1323 | ||
| 1317 | spin_lock(&n->list_lock); | 1324 | spin_lock(&n->list_lock); | 
| 1318 | list_del(&page->lru); | 1325 | __remove_partial(n, page); | 
| 1319 | n->nr_partial--; | ||
| 1320 | spin_unlock(&n->list_lock); | 1326 | spin_unlock(&n->list_lock); | 
| 1321 | } | 1327 | } | 
| 1322 | 1328 | ||
| @@ -1329,8 +1335,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n, | |||
| 1329 | struct page *page) | 1335 | struct page *page) | 
| 1330 | { | 1336 | { | 
| 1331 | if (slab_trylock(page)) { | 1337 | if (slab_trylock(page)) { | 
| 1332 | list_del(&page->lru); | 1338 | __remove_partial(n, page); | 
| 1333 | n->nr_partial--; | ||
| 1334 | __SetPageSlubFrozen(page); | 1339 | __SetPageSlubFrozen(page); | 
| 1335 | return 1; | 1340 | return 1; | 
| 1336 | } | 1341 | } | 
| @@ -2462,9 +2467,8 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) | |||
| 2462 | spin_lock_irqsave(&n->list_lock, flags); | 2467 | spin_lock_irqsave(&n->list_lock, flags); | 
| 2463 | list_for_each_entry_safe(page, h, &n->partial, lru) { | 2468 | list_for_each_entry_safe(page, h, &n->partial, lru) { | 
| 2464 | if (!page->inuse) { | 2469 | if (!page->inuse) { | 
| 2465 | list_del(&page->lru); | 2470 | __remove_partial(n, page); | 
| 2466 | discard_slab(s, page); | 2471 | discard_slab(s, page); | 
| 2467 | n->nr_partial--; | ||
| 2468 | } else { | 2472 | } else { | 
| 2469 | list_slab_objects(s, page, | 2473 | list_slab_objects(s, page, | 
| 2470 | "Objects remaining on kmem_cache_close()"); | 2474 | "Objects remaining on kmem_cache_close()"); | 
| @@ -2822,8 +2826,7 @@ int kmem_cache_shrink(struct kmem_cache *s) | |||
| 2822 | * may have freed the last object and be | 2826 | * may have freed the last object and be | 
| 2823 | * waiting to release the slab. | 2827 | * waiting to release the slab. | 
| 2824 | */ | 2828 | */ | 
| 2825 | list_del(&page->lru); | 2829 | __remove_partial(n, page); | 
| 2826 | n->nr_partial--; | ||
| 2827 | slab_unlock(page); | 2830 | slab_unlock(page); | 
| 2828 | discard_slab(s, page); | 2831 | discard_slab(s, page); | 
| 2829 | } else { | 2832 | } else { | 
