diff options
author | Christoph Lameter <clameter@sgi.com> | 2008-04-23 15:36:52 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2008-04-27 11:26:18 -0400 |
commit | 599870b175987008b5f5c82a70b89f751e12822e (patch) | |
tree | ba01b18de3d30978bc0e7fb05af278710eec9d39 /mm/slub.c | |
parent | d629d819579327267884a12de21ef6d4b539db88 (diff) |
slub: free_list() cleanup
free_list looked a bit screwy so here is an attempt to clean it up.
free_list is is only used for freeing partial lists. We do not need to return a
parameter if we decrement nr_partial within the function which allows a
simplification of the whole thing.
The current version modifies nr_partial outside of the list_lock which is
technically not correct. It was only ok because we should be the only user of
this slab cache at this point.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 18 |
1 files changed, 7 insertions, 11 deletions
@@ -2372,25 +2372,21 @@ const char *kmem_cache_name(struct kmem_cache *s) | |||
2372 | EXPORT_SYMBOL(kmem_cache_name); | 2372 | EXPORT_SYMBOL(kmem_cache_name); |
2373 | 2373 | ||
2374 | /* | 2374 | /* |
2375 | * Attempt to free all slabs on a node. Return the number of slabs we | 2375 | * Attempt to free all partial slabs on a node. |
2376 | * were unable to free. | ||
2377 | */ | 2376 | */ |
2378 | static int free_list(struct kmem_cache *s, struct kmem_cache_node *n, | 2377 | static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) |
2379 | struct list_head *list) | ||
2380 | { | 2378 | { |
2381 | int slabs_inuse = 0; | ||
2382 | unsigned long flags; | 2379 | unsigned long flags; |
2383 | struct page *page, *h; | 2380 | struct page *page, *h; |
2384 | 2381 | ||
2385 | spin_lock_irqsave(&n->list_lock, flags); | 2382 | spin_lock_irqsave(&n->list_lock, flags); |
2386 | list_for_each_entry_safe(page, h, list, lru) | 2383 | list_for_each_entry_safe(page, h, &n->partial, lru) |
2387 | if (!page->inuse) { | 2384 | if (!page->inuse) { |
2388 | list_del(&page->lru); | 2385 | list_del(&page->lru); |
2389 | discard_slab(s, page); | 2386 | discard_slab(s, page); |
2390 | } else | 2387 | n->nr_partial--; |
2391 | slabs_inuse++; | 2388 | } |
2392 | spin_unlock_irqrestore(&n->list_lock, flags); | 2389 | spin_unlock_irqrestore(&n->list_lock, flags); |
2393 | return slabs_inuse; | ||
2394 | } | 2390 | } |
2395 | 2391 | ||
2396 | /* | 2392 | /* |
@@ -2407,8 +2403,8 @@ static inline int kmem_cache_close(struct kmem_cache *s) | |||
2407 | for_each_node_state(node, N_NORMAL_MEMORY) { | 2403 | for_each_node_state(node, N_NORMAL_MEMORY) { |
2408 | struct kmem_cache_node *n = get_node(s, node); | 2404 | struct kmem_cache_node *n = get_node(s, node); |
2409 | 2405 | ||
2410 | n->nr_partial -= free_list(s, n, &n->partial); | 2406 | free_partial(s, n); |
2411 | if (slabs_node(s, node)) | 2407 | if (n->nr_partial || slabs_node(s, node)) |
2412 | return 1; | 2408 | return 1; |
2413 | } | 2409 | } |
2414 | free_kmem_cache_nodes(s); | 2410 | free_kmem_cache_nodes(s); |